aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--tensorflow/compiler/aot/codegen.cc44
-rw-r--r--tensorflow/compiler/aot/codegen.h4
-rw-r--r--tensorflow/compiler/aot/codegen_test.cc2
-rw-r--r--tensorflow/compiler/aot/embedded_protocol_buffers.cc24
-rw-r--r--tensorflow/compiler/aot/embedded_protocol_buffers.h2
-rw-r--r--tensorflow/compiler/aot/tests/BUILD7
-rw-r--r--tensorflow/compiler/aot/tfcompile.bzl59
-rw-r--r--tensorflow/compiler/aot/tfcompile_main.cc7
-rw-r--r--tensorflow/compiler/jit/BUILD2
-rw-r--r--tensorflow/compiler/jit/deadness_analysis.cc12
-rw-r--r--tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc57
-rw-r--r--tensorflow/compiler/jit/encapsulate_subgraphs_pass_test.cc120
-rw-r--r--tensorflow/compiler/jit/graphcycles/BUILD1
-rw-r--r--tensorflow/compiler/jit/graphcycles/graphcycles.cc4
-rw-r--r--tensorflow/compiler/jit/mark_for_compilation_pass.cc40
-rw-r--r--tensorflow/compiler/jit/mark_for_compilation_pass_test.cc2
-rw-r--r--tensorflow/compiler/jit/partially_decluster_pass.cc14
-rw-r--r--tensorflow/compiler/jit/resource_operation_safety_analysis.cc6
-rw-r--r--tensorflow/compiler/jit/xla_cluster_util.cc9
-rw-r--r--tensorflow/compiler/jit/xla_cluster_util.h2
-rw-r--r--tensorflow/compiler/jit/xla_compilation_cache.cc10
-rw-r--r--tensorflow/compiler/jit/xla_device.cc5
-rw-r--r--tensorflow/compiler/jit/xla_device_context.cc4
-rw-r--r--tensorflow/compiler/jit/xla_device_context.h4
-rw-r--r--tensorflow/compiler/jit/xla_device_ops.h18
-rw-r--r--tensorflow/compiler/jit/xla_fusion_optimizer.cc3
-rw-r--r--tensorflow/compiler/jit/xla_tensor.h2
-rw-r--r--tensorflow/compiler/tests/BUILD1
-rw-r--r--tensorflow/compiler/tests/randomized_tests.cc50
-rw-r--r--tensorflow/compiler/tests/xla_ops_test.py10
-rw-r--r--tensorflow/compiler/tf2xla/BUILD5
-rw-r--r--tensorflow/compiler/tf2xla/dump_graph.cc8
-rw-r--r--tensorflow/compiler/tf2xla/functionalize_cond.cc36
-rw-r--r--tensorflow/compiler/tf2xla/functionalize_control_flow_util.cc2
-rw-r--r--tensorflow/compiler/tf2xla/functionalize_control_flow_util.h13
-rw-r--r--tensorflow/compiler/tf2xla/functionalize_while.cc6
-rw-r--r--tensorflow/compiler/tf2xla/graph_compiler.cc2
-rw-r--r--tensorflow/compiler/tf2xla/graph_compiler.h2
-rw-r--r--tensorflow/compiler/tf2xla/kernels/batchtospace_op.cc2
-rw-r--r--tensorflow/compiler/tf2xla/kernels/bcast_ops.cc4
-rw-r--r--tensorflow/compiler/tf2xla/kernels/depthtospace_op.cc2
-rw-r--r--tensorflow/compiler/tf2xla/kernels/pooling_ops.cc2
-rw-r--r--tensorflow/compiler/tf2xla/kernels/reduction_ops_common.cc4
-rw-r--r--tensorflow/compiler/tf2xla/kernels/reverse_op.cc2
-rw-r--r--tensorflow/compiler/tf2xla/kernels/shape_op.cc2
-rw-r--r--tensorflow/compiler/tf2xla/kernels/spacetobatch_op.cc2
-rw-r--r--tensorflow/compiler/tf2xla/kernels/spacetodepth_op.cc2
-rw-r--r--tensorflow/compiler/tf2xla/kernels/stack_ops.cc2
-rw-r--r--tensorflow/compiler/tf2xla/kernels/strided_slice_op.cc28
-rw-r--r--tensorflow/compiler/tf2xla/kernels/tensor_array_ops.cc2
-rw-r--r--tensorflow/compiler/tf2xla/kernels/transpose_op.cc2
-rw-r--r--tensorflow/compiler/tf2xla/kernels/xla_conv_op.cc2
-rw-r--r--tensorflow/compiler/tf2xla/kernels/xla_dot_op.cc2
-rw-r--r--tensorflow/compiler/tf2xla/lib/BUILD2
-rw-r--r--tensorflow/compiler/tf2xla/lib/batch_dot.cc4
-rw-r--r--tensorflow/compiler/tf2xla/lib/batch_dot.h10
-rw-r--r--tensorflow/compiler/tf2xla/lib/cholesky.cc4
-rw-r--r--tensorflow/compiler/tf2xla/lib/cholesky.h6
-rw-r--r--tensorflow/compiler/tf2xla/lib/qr.cc6
-rw-r--r--tensorflow/compiler/tf2xla/lib/qr.h3
-rw-r--r--tensorflow/compiler/tf2xla/lib/triangular_solve.cc12
-rw-r--r--tensorflow/compiler/tf2xla/lib/triangular_solve.h9
-rw-r--r--tensorflow/compiler/tf2xla/lib/while_loop.cc8
-rw-r--r--tensorflow/compiler/tf2xla/lib/while_loop.h6
-rw-r--r--tensorflow/compiler/tf2xla/ops/xla_ops.cc4
-rw-r--r--tensorflow/compiler/tf2xla/resource_operation_table.cc22
-rw-r--r--tensorflow/compiler/tf2xla/resource_operation_table.h8
-rw-r--r--tensorflow/compiler/tf2xla/resource_operation_table_test.cc2
-rw-r--r--tensorflow/compiler/tf2xla/sharding_util.cc1
-rw-r--r--tensorflow/compiler/tf2xla/tf2xla.cc10
-rw-r--r--tensorflow/compiler/tf2xla/tf2xla_util.cc10
-rw-r--r--tensorflow/compiler/tf2xla/tf2xla_util.h2
-rw-r--r--tensorflow/compiler/tf2xla/tf2xla_util_test.cc6
-rw-r--r--tensorflow/compiler/tf2xla/xla_compilation_device.cc11
-rw-r--r--tensorflow/compiler/tf2xla/xla_compiler.cc19
-rw-r--r--tensorflow/compiler/tf2xla/xla_context.cc1
-rw-r--r--tensorflow/compiler/tf2xla/xla_op_kernel.cc26
-rw-r--r--tensorflow/compiler/tf2xla/xla_op_kernel.h26
-rw-r--r--tensorflow/compiler/tf2xla/xla_op_registry.cc20
-rw-r--r--tensorflow/compiler/tf2xla/xla_op_registry.h16
-rw-r--r--tensorflow/compiler/tf2xla/xla_resource.cc4
-rw-r--r--tensorflow/compiler/xla/client/xla_builder.cc82
-rw-r--r--tensorflow/compiler/xla/client/xla_builder.h97
-rw-r--r--tensorflow/compiler/xla/reference_util.cc4
-rw-r--r--tensorflow/compiler/xla/service/BUILD50
-rw-r--r--tensorflow/compiler/xla/service/algebraic_simplifier_test.cc18
-rw-r--r--tensorflow/compiler/xla/service/bfloat16_normalization_test.cc4
-rw-r--r--tensorflow/compiler/xla/service/buffer_assignment.cc28
-rw-r--r--tensorflow/compiler/xla/service/buffer_assignment_test.cc102
-rw-r--r--tensorflow/compiler/xla/service/buffer_liveness_test.cc42
-rw-r--r--tensorflow/compiler/xla/service/cpu/conv_canonicalization_test.cc7
-rw-r--r--tensorflow/compiler/xla/service/cpu/cpu_compiler.cc56
-rw-r--r--tensorflow/compiler/xla/service/cpu/cpu_instruction_fusion_test.cc4
-rw-r--r--tensorflow/compiler/xla/service/cpu/ir_emitter.cc2
-rw-r--r--tensorflow/compiler/xla/service/cpu/ir_emitter.h2
-rw-r--r--tensorflow/compiler/xla/service/gpu/BUILD1
-rw-r--r--tensorflow/compiler/xla/service/gpu/cudnn_convolution_rewriter_test.cc7
-rw-r--r--tensorflow/compiler/xla/service/gpu/gpu_hlo_schedule.cc6
-rw-r--r--tensorflow/compiler/xla/service/gpu/gpu_hlo_schedule.h4
-rw-r--r--tensorflow/compiler/xla/service/gpu/multi_output_fusion_test.cc10
-rw-r--r--tensorflow/compiler/xla/service/graphviz_example.cc4
-rw-r--r--tensorflow/compiler/xla/service/heap_simulator.cc43
-rw-r--r--tensorflow/compiler/xla/service/heap_simulator.h48
-rw-r--r--tensorflow/compiler/xla/service/heap_simulator_test.cc43
-rw-r--r--tensorflow/compiler/xla/service/hlo.proto2
-rw-r--r--tensorflow/compiler/xla/service/hlo_alias_analysis_test.cc16
-rw-r--r--tensorflow/compiler/xla/service/hlo_computation_test.cc12
-rw-r--r--tensorflow/compiler/xla/service/hlo_creation_utils.cc9
-rw-r--r--tensorflow/compiler/xla/service/hlo_creation_utils.h9
-rw-r--r--tensorflow/compiler/xla/service/hlo_cse.cc2
-rw-r--r--tensorflow/compiler/xla/service/hlo_dataflow_analysis_test.cc33
-rw-r--r--tensorflow/compiler/xla/service/hlo_evaluator.cc2
-rw-r--r--tensorflow/compiler/xla/service/hlo_evaluator.h2
-rw-r--r--tensorflow/compiler/xla/service/hlo_evaluator_test.cc7
-rw-r--r--tensorflow/compiler/xla/service/hlo_instruction.cc47
-rw-r--r--tensorflow/compiler/xla/service/hlo_instruction.h16
-rw-r--r--tensorflow/compiler/xla/service/hlo_instruction_test.cc13
-rw-r--r--tensorflow/compiler/xla/service/hlo_instructions.cc2
-rw-r--r--tensorflow/compiler/xla/service/hlo_instructions.h2
-rw-r--r--tensorflow/compiler/xla/service/hlo_ordering.cc86
-rw-r--r--tensorflow/compiler/xla/service/hlo_ordering.h22
-rw-r--r--tensorflow/compiler/xla/service/hlo_ordering_test.cc101
-rw-r--r--tensorflow/compiler/xla/service/hlo_parser.cc26
-rw-r--r--tensorflow/compiler/xla/service/hlo_rematerialization.cc87
-rw-r--r--tensorflow/compiler/xla/service/hlo_rematerialization.h19
-rw-r--r--tensorflow/compiler/xla/service/hlo_rematerialization_test.cc46
-rw-r--r--tensorflow/compiler/xla/service/hlo_schedule.cc291
-rw-r--r--tensorflow/compiler/xla/service/hlo_schedule.h151
-rw-r--r--tensorflow/compiler/xla/service/hlo_schedule_test.cc341
-rw-r--r--tensorflow/compiler/xla/service/hlo_scheduling.cc230
-rw-r--r--tensorflow/compiler/xla/service/hlo_scheduling.h54
-rw-r--r--tensorflow/compiler/xla/service/hlo_scheduling_test.cc343
-rw-r--r--tensorflow/compiler/xla/service/indexed_array_analysis.cc8
-rw-r--r--tensorflow/compiler/xla/service/indexed_array_analysis.h13
-rw-r--r--tensorflow/compiler/xla/service/transpose_folding_test.cc7
-rw-r--r--tensorflow/compiler/xla/service/tuple_points_to_analysis_test.cc4
-rw-r--r--tensorflow/compiler/xla/service/while_loop_constant_sinking.cc2
-rw-r--r--tensorflow/compiler/xla/tests/hlo_test_base.cc8
-rw-r--r--tensorflow/compiler/xla/tests/hlo_test_base.h2
-rw-r--r--tensorflow/compiler/xla/tests/multioutput_fusion_test.cc7
-rw-r--r--tensorflow/compiler/xla/xla_data.proto2
-rw-r--r--tensorflow/compiler/xrt/BUILD1
-rw-r--r--tensorflow/compiler/xrt/kernels/BUILD5
-rw-r--r--tensorflow/compiler/xrt/kernels/xrt_compile_ops.cc4
-rw-r--r--tensorflow/compiler/xrt/xrt_state.cc8
-rw-r--r--tensorflow/contrib/__init__.py8
-rw-r--r--tensorflow/contrib/bigtable/kernels/bigtable_kernels.cc6
-rw-r--r--tensorflow/contrib/bigtable/kernels/bigtable_lib.h4
-rw-r--r--tensorflow/contrib/bigtable/kernels/bigtable_lookup_dataset_op.cc2
-rw-r--r--tensorflow/contrib/bigtable/kernels/bigtable_prefix_key_dataset_op.cc2
-rw-r--r--tensorflow/contrib/bigtable/kernels/bigtable_range_key_dataset_op.cc2
-rw-r--r--tensorflow/contrib/bigtable/kernels/bigtable_sample_key_pairs_dataset_op.cc2
-rw-r--r--tensorflow/contrib/bigtable/kernels/bigtable_sample_keys_dataset_op.cc2
-rw-r--r--tensorflow/contrib/bigtable/kernels/bigtable_scan_dataset_op.cc2
-rw-r--r--tensorflow/contrib/data/kernels/assert_next_dataset_op.cc2
-rw-r--r--tensorflow/contrib/data/kernels/csv_dataset_op.cc2
-rw-r--r--tensorflow/contrib/data/kernels/directed_interleave_dataset_op.cc4
-rw-r--r--tensorflow/contrib/data/kernels/identity_indexed_dataset.cc2
-rw-r--r--tensorflow/contrib/data/kernels/ignore_errors_dataset_op.cc4
-rw-r--r--tensorflow/contrib/data/kernels/indexed_dataset.cc5
-rw-r--r--tensorflow/contrib/data/kernels/indexed_dataset.h2
-rw-r--r--tensorflow/contrib/data/kernels/lmdb_dataset_op.cc2
-rw-r--r--tensorflow/contrib/data/kernels/prefetching_kernels.cc4
-rw-r--r--tensorflow/contrib/data/kernels/threadpool_dataset_op.cc2
-rw-r--r--tensorflow/contrib/data/kernels/unique_dataset_op.cc4
-rw-r--r--tensorflow/contrib/data/python/kernel_tests/map_defun_op_test.py16
-rw-r--r--tensorflow/contrib/data/python/ops/batching.py6
-rw-r--r--tensorflow/contrib/distribute/python/examples/keras_mnist.py4
-rw-r--r--tensorflow/contrib/hadoop/kernels/hadoop_dataset_ops.cc4
-rw-r--r--tensorflow/contrib/lite/builtin_op_data.h10
-rw-r--r--tensorflow/contrib/lite/context.h40
-rw-r--r--tensorflow/contrib/lite/delegates/eager/delegate_test.cc20
-rw-r--r--tensorflow/contrib/lite/delegates/eager/kernel.cc2
-rw-r--r--tensorflow/contrib/lite/delegates/eager/test_util.cc43
-rw-r--r--tensorflow/contrib/lite/delegates/eager/test_util.h28
-rw-r--r--tensorflow/contrib/lite/delegates/eager/util.cc36
-rw-r--r--tensorflow/contrib/lite/delegates/eager/util.h13
-rw-r--r--tensorflow/contrib/lite/delegates/eager/util_test.cc38
-rw-r--r--tensorflow/contrib/lite/g3doc/models.md68
-rw-r--r--tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc348
-rw-r--r--tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm_test.cc70
-rw-r--r--tensorflow/contrib/lite/kernels/eigen_support.h2
-rw-r--r--tensorflow/contrib/lite/kernels/internal/kernel_utils.cc39
-rw-r--r--tensorflow/contrib/lite/kernels/internal/kernel_utils.h17
-rw-r--r--tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h102
-rw-r--r--tensorflow/contrib/lite/kernels/internal/types.h5
-rw-r--r--tensorflow/contrib/lite/nnapi_delegate.cc65
-rw-r--r--tensorflow/contrib/lite/nnapi_delegate_disabled.cc8
-rw-r--r--tensorflow/contrib/lite/python/BUILD2
-rw-r--r--tensorflow/contrib/lite/python/lite.py15
-rw-r--r--tensorflow/contrib/lite/python/lite_test.py19
-rw-r--r--tensorflow/contrib/lite/testing/BUILD1
-rw-r--r--tensorflow/contrib/lite/toco/g3doc/toco_landscape.svg2
-rw-r--r--tensorflow/contrib/lite/tools/optimize/quantize_weights.cc19
-rw-r--r--tensorflow/contrib/tpu/ops/cross_replica_ops.cc89
-rw-r--r--tensorflow/contrib/tpu/proto/optimization_parameters.proto8
-rw-r--r--tensorflow/contrib/tpu/python/ops/tpu_ops.py64
-rw-r--r--tensorflow/contrib/tpu/python/tpu/keras_support.py2
-rw-r--r--tensorflow/contrib/tpu/python/tpu/keras_tpu_variables.py2
-rw-r--r--tensorflow/core/framework/dataset.cc2
-rw-r--r--tensorflow/core/framework/dataset.h30
-rw-r--r--tensorflow/core/framework/dataset_stateful_op_whitelist.h11
-rw-r--r--tensorflow/core/framework/stats_aggregator.h3
-rw-r--r--tensorflow/core/grappler/graph_analyzer/graph_analyzer.h2
-rw-r--r--tensorflow/core/kernels/data/batch_dataset_op.cc4
-rw-r--r--tensorflow/core/kernels/data/cache_dataset_ops.cc4
-rw-r--r--tensorflow/core/kernels/data/captured_function.cc2
-rw-r--r--tensorflow/core/kernels/data/captured_function.h12
-rw-r--r--tensorflow/core/kernels/data/concatenate_dataset_op.cc4
-rw-r--r--tensorflow/core/kernels/data/dataset_ops.cc2
-rw-r--r--tensorflow/core/kernels/data/dataset_utils.cc6
-rw-r--r--tensorflow/core/kernels/data/dataset_utils.h6
-rw-r--r--tensorflow/core/kernels/data/dense_to_sparse_batch_dataset_op.cc4
-rw-r--r--tensorflow/core/kernels/data/filter_by_component_dataset_op.cc4
-rw-r--r--tensorflow/core/kernels/data/filter_dataset_op.cc4
-rw-r--r--tensorflow/core/kernels/data/flat_map_dataset_op.cc6
-rw-r--r--tensorflow/core/kernels/data/generator_dataset_op.cc4
-rw-r--r--tensorflow/core/kernels/data/generator_dataset_op.h2
-rw-r--r--tensorflow/core/kernels/data/group_by_reducer_dataset_op.cc2
-rw-r--r--tensorflow/core/kernels/data/group_by_window_dataset_op.cc2
-rw-r--r--tensorflow/core/kernels/data/interleave_dataset_op.cc8
-rw-r--r--tensorflow/core/kernels/data/iterator_ops.cc19
-rw-r--r--tensorflow/core/kernels/data/iterator_ops.h2
-rw-r--r--tensorflow/core/kernels/data/map_and_batch_dataset_op.cc4
-rw-r--r--tensorflow/core/kernels/data/map_dataset_op.cc4
-rw-r--r--tensorflow/core/kernels/data/map_defun_op.cc10
-rw-r--r--tensorflow/core/kernels/data/optimize_dataset_op.cc2
-rw-r--r--tensorflow/core/kernels/data/optional_ops.cc2
-rw-r--r--tensorflow/core/kernels/data/optional_ops.h2
-rw-r--r--tensorflow/core/kernels/data/padded_batch_dataset_op.cc4
-rw-r--r--tensorflow/core/kernels/data/parallel_interleave_dataset_op.cc8
-rw-r--r--tensorflow/core/kernels/data/parallel_map_dataset_op.cc4
-rw-r--r--tensorflow/core/kernels/data/parallel_map_iterator.cc2
-rw-r--r--tensorflow/core/kernels/data/parallel_map_iterator.h2
-rw-r--r--tensorflow/core/kernels/data/parse_example_dataset_op.cc4
-rw-r--r--tensorflow/core/kernels/data/prefetch_autotuner.cc2
-rw-r--r--tensorflow/core/kernels/data/prefetch_autotuner.h2
-rw-r--r--tensorflow/core/kernels/data/prefetch_autotuner_test.cc2
-rw-r--r--tensorflow/core/kernels/data/prefetch_dataset_op.cc5
-rw-r--r--tensorflow/core/kernels/data/prefetch_dataset_op.h2
-rw-r--r--tensorflow/core/kernels/data/random_dataset_op.cc4
-rw-r--r--tensorflow/core/kernels/data/range_dataset_op.cc4
-rw-r--r--tensorflow/core/kernels/data/reader_dataset_ops.cc4
-rw-r--r--tensorflow/core/kernels/data/repeat_dataset_op.cc4
-rw-r--r--tensorflow/core/kernels/data/scan_dataset_op.cc4
-rw-r--r--tensorflow/core/kernels/data/shuffle_dataset_op.cc4
-rw-r--r--tensorflow/core/kernels/data/single_threaded_executor.cc2
-rw-r--r--tensorflow/core/kernels/data/single_threaded_executor.h2
-rw-r--r--tensorflow/core/kernels/data/single_threaded_executor_test.cc2
-rw-r--r--tensorflow/core/kernels/data/skip_dataset_op.cc4
-rw-r--r--tensorflow/core/kernels/data/slide_dataset_op.cc4
-rw-r--r--tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc4
-rw-r--r--tensorflow/core/kernels/data/sql/driver_manager.cc4
-rw-r--r--tensorflow/core/kernels/data/sql/driver_manager.h4
-rw-r--r--tensorflow/core/kernels/data/sql/query_connection.h3
-rw-r--r--tensorflow/core/kernels/data/sql/sqlite_query_connection.cc4
-rw-r--r--tensorflow/core/kernels/data/sql/sqlite_query_connection.h4
-rw-r--r--tensorflow/core/kernels/data/sql_dataset_ops.cc5
-rw-r--r--tensorflow/core/kernels/data/stats_aggregator_dataset_op.cc2
-rw-r--r--tensorflow/core/kernels/data/stats_aggregator_ops.cc2
-rw-r--r--tensorflow/core/kernels/data/stats_dataset_ops.cc2
-rw-r--r--tensorflow/core/kernels/data/take_dataset_op.cc4
-rw-r--r--tensorflow/core/kernels/data/tensor_dataset_op.cc4
-rw-r--r--tensorflow/core/kernels/data/tensor_queue_dataset_op.cc4
-rw-r--r--tensorflow/core/kernels/data/tensor_slice_dataset_op.cc4
-rw-r--r--tensorflow/core/kernels/data/unbatch_dataset_op.cc4
-rw-r--r--tensorflow/core/kernels/data/window_dataset.cc2
-rw-r--r--tensorflow/core/kernels/data/window_dataset.h2
-rw-r--r--tensorflow/core/kernels/data/window_dataset_op.cc4
-rw-r--r--tensorflow/core/kernels/data/writer_ops.cc3
-rw-r--r--tensorflow/core/kernels/data/zip_dataset_op.cc4
-rw-r--r--tensorflow/core/kernels/eigen_backward_cuboid_convolutions.h201
-rw-r--r--tensorflow/core/kernels/eigen_backward_spatial_convolutions.h7
-rw-r--r--tensorflow/core/kernels/eigen_benchmark_cpu_test.cc6
-rw-r--r--tensorflow/core/kernels/gather_nd_op_cpu_impl.h4
-rw-r--r--tensorflow/core/util/ctc/ctc_beam_entry.h2
-rw-r--r--tensorflow/core/util/ctc/ctc_beam_scorer.h2
-rw-r--r--tensorflow/core/util/ctc/ctc_beam_search.h1
-rw-r--r--tensorflow/core/util/ctc/ctc_decoder.h2
-rw-r--r--tensorflow/core/util/ctc/ctc_loss_util.h2
-rw-r--r--tensorflow/python/BUILD13
-rw-r--r--tensorflow/python/__init__.py7
-rw-r--r--tensorflow/python/client/session_test.py2
-rw-r--r--tensorflow/python/eager/BUILD2
-rw-r--r--tensorflow/python/eager/function.py214
-rw-r--r--tensorflow/python/eager/tensor_test.py1
-rw-r--r--tensorflow/python/estimator/BUILD5
-rw-r--r--tensorflow/python/estimator/estimator.py2
-rw-r--r--tensorflow/python/estimator/run_config.py7
-rw-r--r--tensorflow/python/framework/ops.py6
-rw-r--r--tensorflow/python/framework/test_util.py156
-rw-r--r--tensorflow/python/framework/test_util_test.py3
-rw-r--r--tensorflow/python/keras/engine/base_layer.py2
-rw-r--r--tensorflow/python/keras/engine/network.py2
-rw-r--r--tensorflow/python/keras/engine/training_distributed.py237
-rw-r--r--tensorflow/python/kernel_tests/py_func_test.py12
-rw-r--r--tensorflow/python/lib/core/py_seq_tensor.cc25
-rw-r--r--tensorflow/python/ops/gradients.py2
-rw-r--r--tensorflow/python/ops/gradients_impl.py45
-rw-r--r--tensorflow/python/ops/gradients_test.py31
-rw-r--r--tensorflow/python/ops/io_ops.py37
-rw-r--r--tensorflow/python/ops/nn_ops.py2
-rw-r--r--tensorflow/python/ops/parallel_for/pfor.py4
-rw-r--r--tensorflow/python/ops/script_ops.py6
-rw-r--r--tensorflow/python/tools/component_api_helper.py5
-rw-r--r--tensorflow/python/tools/print_selective_registration_header_test.py12
-rw-r--r--tensorflow/python/tools/selective_registration_header_lib.py17
-rw-r--r--tensorflow/python/training/checkpoint_ops.py3
-rw-r--r--tensorflow/python/training/input.py80
-rw-r--r--tensorflow/python/training/learning_rate_decay.py432
-rw-r--r--tensorflow/python/training/learning_rate_decay_v2.py898
-rw-r--r--tensorflow/python/training/learning_rate_decay_v2_test.py497
-rw-r--r--tensorflow/python/training/warm_starting_util.py100
-rw-r--r--tensorflow/python/training/warm_starting_util_test.py140
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.estimator.-vocab-info.pbtxt4
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.train.-vocab-info.pbtxt4
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-fixed-length-record-reader.pbtxt46
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-identity-reader.pbtxt46
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-l-m-d-b-reader.pbtxt46
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-reader-base.pbtxt45
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-t-f-record-reader.pbtxt46
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-text-line-reader.pbtxt46
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.-whole-file-reader.pbtxt46
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.estimator.-vocab-info.pbtxt4
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.pbtxt28
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.-vocab-info.pbtxt4
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.train.pbtxt52
-rw-r--r--tensorflow/tools/compatibility/renames_v2.py1
-rw-r--r--tensorflow/tools/compatibility/tf_upgrade_v2.py24
-rw-r--r--tensorflow/tools/compatibility/tf_upgrade_v2_test.py13
-rwxr-xr-xtensorflow/workspace.bzl8
-rw-r--r--third_party/jpeg/jpeg.BUILD324
332 files changed, 5918 insertions, 3309 deletions
diff --git a/tensorflow/compiler/aot/codegen.cc b/tensorflow/compiler/aot/codegen.cc
index 2b1ce34b37..b17bc658fa 100644
--- a/tensorflow/compiler/aot/codegen.cc
+++ b/tensorflow/compiler/aot/codegen.cc
@@ -20,6 +20,7 @@ limitations under the License.
#include <vector>
#include "absl/memory/memory.h"
+#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/types/span.h"
@@ -31,7 +32,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/lib/core/errors.h"
-#include "tensorflow/core/lib/strings/strcat.h"
namespace tensorflow {
namespace tfcompile {
@@ -135,12 +135,12 @@ Status AddRewritesForShape(int i, const xla::Shape& shape,
indices = "[0]";
} else {
for (int dim = 0; dim < shape.dimensions_size(); ++dim) {
- dim_vars.push_back(strings::StrCat("size_t dim", dim));
- dim_sizes += strings::StrCat("[", shape.dimensions(dim), "]");
- indices += strings::StrCat("[dim", dim, "]");
+ dim_vars.push_back(absl::StrCat("size_t dim", dim));
+ dim_sizes += absl::StrCat("[", shape.dimensions(dim), "]");
+ indices += absl::StrCat("[dim", dim, "]");
}
}
- rewrites->push_back({"{{I}}", strings::StrCat(i)});
+ rewrites->push_back({"{{I}}", absl::StrCat(i)});
rewrites->push_back({"{{TYPE}}", type});
rewrites->push_back({"{{DIM_VARS}}", absl::StrJoin(dim_vars, ", ")});
rewrites->push_back({"{{DIM_SIZES}}", dim_sizes});
@@ -194,7 +194,7 @@ Status GenArgMethods(const tf2xla::Config& config, const xla::ProgramShape& ps,
arg_data({{I}}))){{INDICES}};
}
)";
- *methods += RewriteWithName(strings::StrCat(i), code, rewrites);
+ *methods += RewriteWithName(absl::StrCat(i), code, rewrites);
if (!config.feed(i).name().empty()) {
*methods += RewriteWithName("_" + config.feed(i).name(), code, rewrites);
}
@@ -235,7 +235,7 @@ Status GenResultMethods(const tf2xla::Config& config,
result_data({{I}}))){{INDICES}};
}
)";
- *methods += RewriteWithName(strings::StrCat(i), code, rewrites);
+ *methods += RewriteWithName(absl::StrCat(i), code, rewrites);
if (!config.fetch(i).name().empty()) {
*methods += RewriteWithName("_" + config.fetch(i).name(), code, rewrites);
}
@@ -304,8 +304,8 @@ std::vector<string> BufferInfosToCppExpression(
string encoded_second_as_str =
encoded.second == ~0ULL
? "~0ULL"
- : strings::StrCat(encoded.second, "ULL");
- return strings::StrCat(
+ : absl::StrCat(encoded.second, "ULL");
+ return absl::StrCat(
"::tensorflow::cpu_function_runtime::BufferInfo({",
encoded.first, "ULL, ", encoded_second_as_str, "})");
});
@@ -352,13 +352,13 @@ Status GenerateHeader(const CodegenOpts& opts, const tf2xla::Config& config,
// Create rewrite strings for namespace start and end.
string ns_start;
for (const string& n : opts.namespaces) {
- ns_start += strings::StrCat("namespace ", n, " {\n");
+ ns_start += absl::StrCat("namespace ", n, " {\n");
}
ns_start += "\n";
string ns_end("\n");
for (int i = opts.namespaces.size() - 1; i >= 0; --i) {
const string& n = opts.namespaces[i];
- ns_end += strings::StrCat("} // end namespace ", n, "\n");
+ ns_end += absl::StrCat("} // end namespace ", n, "\n");
}
// Generate metadata.
@@ -568,10 +568,10 @@ class {{CLASS}} : public tensorflow::XlaCompiledCpuFunction {
)";
// The replacement strategy is naive, but good enough for our purposes.
const std::vector<std::pair<string, string>> rewrites = {
- {"{{ARG_BYTES_ALIGNED}}", strings::StrCat(arg_bytes_aligned)},
- {"{{ARG_BYTES_TOTAL}}", strings::StrCat(arg_bytes_total)},
+ {"{{ARG_BYTES_ALIGNED}}", absl::StrCat(arg_bytes_aligned)},
+ {"{{ARG_BYTES_TOTAL}}", absl::StrCat(arg_bytes_total)},
{"{{ARG_NAMES_CODE}}", arg_names_code},
- {"{{ARG_NUM}}", strings::StrCat(arg_index_table.size())},
+ {"{{ARG_NUM}}", absl::StrCat(arg_index_table.size())},
{"{{ARG_INDEX_TABLE}}", absl::StrJoin(arg_index_table, ", ")},
{"{{ASSIGN_PROFILE_COUNTERS_SIZE}}", assign_profile_counters_size},
{"{{CLASS}}", opts.class_name},
@@ -590,11 +590,11 @@ class {{CLASS}} : public tensorflow::XlaCompiledCpuFunction {
{"{{PROGRAM_SHAPE}}", xla::ShapeUtil::HumanString(ps)},
{"{{PROGRAM_SHAPE_SHIM_EXPRESSION}}",
metadata_result.program_shape_access_shim},
- {"{{RESULT_INDEX}}", strings::StrCat(result_index)},
+ {"{{RESULT_INDEX}}", absl::StrCat(result_index)},
{"{{RESULT_NAMES_CODE}}", result_names_code},
- {"{{TEMP_BYTES_ALIGNED}}", strings::StrCat(temp_bytes_aligned)},
- {"{{TEMP_BYTES_TOTAL}}", strings::StrCat(temp_bytes_total)},
- {"{{NUM_BUFFERS}}", strings::StrCat(buffer_infos.size())},
+ {"{{TEMP_BYTES_ALIGNED}}", absl::StrCat(temp_bytes_aligned)},
+ {"{{TEMP_BYTES_TOTAL}}", absl::StrCat(temp_bytes_total)},
+ {"{{NUM_BUFFERS}}", absl::StrCat(buffer_infos.size())},
{"{{BUFFER_INFOS_AS_STRING}}",
absl::StrJoin(buffer_infos_as_strings, ",\n")}};
absl::StrReplaceAll(rewrites, header);
@@ -602,13 +602,13 @@ class {{CLASS}} : public tensorflow::XlaCompiledCpuFunction {
}
static string CreateUniqueIdentifier(const CodegenOpts& opts,
- StringPiece suffix) {
+ absl::string_view suffix) {
string result = "__tfcompile";
for (const string& n : opts.namespaces) {
- strings::StrAppend(&result, "_", n);
+ absl::StrAppend(&result, "_", n);
}
- strings::StrAppend(&result, "_", opts.class_name, "_", suffix);
+ absl::StrAppend(&result, "_", opts.class_name, "_", suffix);
return result;
}
@@ -678,7 +678,7 @@ Status ParseCppClass(const string& cpp_class, string* class_name,
return Status::OK();
}
-Status ValidateCppIdent(StringPiece ident, StringPiece msg) {
+Status ValidateCppIdent(absl::string_view ident, absl::string_view msg) {
if (ident.empty()) {
return errors::InvalidArgument("empty identifier: ", msg);
}
diff --git a/tensorflow/compiler/aot/codegen.h b/tensorflow/compiler/aot/codegen.h
index 83f2d3ee11..90410c46a8 100644
--- a/tensorflow/compiler/aot/codegen.h
+++ b/tensorflow/compiler/aot/codegen.h
@@ -19,9 +19,9 @@ limitations under the License.
#include <string>
#include <vector>
+#include "absl/strings/string_view.h"
#include "tensorflow/compiler/aot/compile.h"
#include "tensorflow/compiler/tf2xla/tf2xla.pb.h"
-#include "tensorflow/core/lib/core/stringpiece.h"
namespace tensorflow {
namespace tfcompile {
@@ -96,7 +96,7 @@ Status ParseCppClass(const string& cpp_class, string* class_name,
// ValidateCppIdent returns OK iff ident is a valid C++ identifier. The msg is
// appended to error messages.
-Status ValidateCppIdent(StringPiece ident, StringPiece msg);
+Status ValidateCppIdent(absl::string_view ident, absl::string_view msg);
} // namespace tfcompile
} // namespace tensorflow
diff --git a/tensorflow/compiler/aot/codegen_test.cc b/tensorflow/compiler/aot/codegen_test.cc
index e3a53edb73..bb288d2300 100644
--- a/tensorflow/compiler/aot/codegen_test.cc
+++ b/tensorflow/compiler/aot/codegen_test.cc
@@ -19,11 +19,11 @@ limitations under the License.
#include <vector>
#include "absl/strings/match.h"
+#include "absl/strings/string_view.h"
#include "llvm/Support/TargetSelect.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
-#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
diff --git a/tensorflow/compiler/aot/embedded_protocol_buffers.cc b/tensorflow/compiler/aot/embedded_protocol_buffers.cc
index f1e8e5c084..3c32d533f6 100644
--- a/tensorflow/compiler/aot/embedded_protocol_buffers.cc
+++ b/tensorflow/compiler/aot/embedded_protocol_buffers.cc
@@ -38,11 +38,11 @@ using xla::llvm_ir::AsStringRef;
static void AddEmbeddedProtocolBufferToLlvmModule(
llvm::Module* module, const ::tensorflow::protobuf::MessageLite& proto,
- StringPiece unique_identifier, string* protobuf_array_symbol_name,
+ absl::string_view unique_identifier, string* protobuf_array_symbol_name,
int64* protobuf_array_size) {
string protobuf_array_contents = proto.SerializeAsString();
*protobuf_array_symbol_name =
- strings::StrCat(unique_identifier, "_protobuf_array_contents");
+ absl::StrCat(unique_identifier, "_protobuf_array_contents");
*protobuf_array_size = protobuf_array_contents.size();
llvm::Constant* protobuf_array_initializer =
@@ -55,9 +55,9 @@ static void AddEmbeddedProtocolBufferToLlvmModule(
protobuf_array_initializer, AsStringRef(*protobuf_array_symbol_name));
}
-static string CreateCPPShimExpression(StringPiece qualified_cpp_protobuf_name,
- StringPiece protobuf_array_symbol_name,
- int64 protobuf_array_size) {
+static string CreateCPPShimExpression(
+ absl::string_view qualified_cpp_protobuf_name,
+ absl::string_view protobuf_array_symbol_name, int64 protobuf_array_size) {
string code =
"[]() {\n"
" {{PROTOBUF_NAME}}* proto = new {{PROTOBUF_NAME}};\n"
@@ -68,9 +68,9 @@ static string CreateCPPShimExpression(StringPiece qualified_cpp_protobuf_name,
return absl::StrReplaceAll(
code,
{
- {"{{ARRAY_SYMBOL}}", strings::StrCat(protobuf_array_symbol_name)},
- {"{{ARRAY_SIZE}}", strings::StrCat(protobuf_array_size)},
- {"{{PROTOBUF_NAME}}", strings::StrCat(qualified_cpp_protobuf_name)},
+ {"{{ARRAY_SYMBOL}}", absl::StrCat(protobuf_array_symbol_name)},
+ {"{{ARRAY_SIZE}}", absl::StrCat(protobuf_array_size)},
+ {"{{PROTOBUF_NAME}}", absl::StrCat(qualified_cpp_protobuf_name)},
});
}
@@ -93,7 +93,7 @@ static StatusOr<string> CodegenModule(llvm::TargetMachine* target_machine,
}
static StatusOr<std::unique_ptr<llvm::TargetMachine>>
-GetTargetMachineFromTriple(StringPiece target_triple) {
+GetTargetMachineFromTriple(absl::string_view target_triple) {
std::string error;
std::string normalized_triple =
llvm::Triple::normalize(AsStringRef(absl::string_view(target_triple)));
@@ -110,7 +110,7 @@ GetTargetMachineFromTriple(StringPiece target_triple) {
}
StatusOr<EmbeddedProtocolBuffers> CreateEmbeddedProtocolBuffers(
- StringPiece target_triple,
+ absl::string_view target_triple,
absl::Span<const ProtobufToEmbed> protobufs_to_embed) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<llvm::TargetMachine> target_machine,
GetTargetMachineFromTriple(target_triple));
@@ -135,8 +135,8 @@ StatusOr<EmbeddedProtocolBuffers> CreateEmbeddedProtocolBuffers(
protobuf_to_embed.qualified_cpp_protobuf_name,
protobuf_array_symbol_name, protobuf_array_size);
- cpp_variable_decl = strings::StrCat("extern \"C\" char ",
- protobuf_array_symbol_name, "[];");
+ cpp_variable_decl =
+ absl::StrCat("extern \"C\" char ", protobuf_array_symbol_name, "[];");
} else {
cpp_shim = "nullptr";
}
diff --git a/tensorflow/compiler/aot/embedded_protocol_buffers.h b/tensorflow/compiler/aot/embedded_protocol_buffers.h
index 4f940c0197..cf5c04ac4b 100644
--- a/tensorflow/compiler/aot/embedded_protocol_buffers.h
+++ b/tensorflow/compiler/aot/embedded_protocol_buffers.h
@@ -83,7 +83,7 @@ struct ProtobufToEmbed {
// is stored in the object_file_data field in the returned
// EmbeddedProtocolBuffers instance.
StatusOr<EmbeddedProtocolBuffers> CreateEmbeddedProtocolBuffers(
- StringPiece target_triple,
+ absl::string_view target_triple,
absl::Span<const ProtobufToEmbed> protobufs_to_embed);
} // namespace tfcompile
diff --git a/tensorflow/compiler/aot/tests/BUILD b/tensorflow/compiler/aot/tests/BUILD
index 723e9bec8a..8d94f5495c 100644
--- a/tensorflow/compiler/aot/tests/BUILD
+++ b/tensorflow/compiler/aot/tests/BUILD
@@ -67,7 +67,12 @@ genrule(
"test_graph_tfmatmulandadd.pb",
"test_graph_tfsplits.pb",
],
- cmd = "$(location :make_test_graphs) --out_dir $(@D)",
+ # Set CUDA_VISIBLE_DEVICES='' to prevent the code we launch from using any
+ # GPUs which might be present. This is important because builds may run
+ # concurrently with tests, and tests need to be able to assume that they
+ # have control of the full GPU.
+ cmd = "CUDA_VISIBLE_DEVICES='' " +
+ "$(location :make_test_graphs) --out_dir $(@D)",
tags = ["manual"],
tools = [":make_test_graphs"],
)
diff --git a/tensorflow/compiler/aot/tfcompile.bzl b/tensorflow/compiler/aot/tfcompile.bzl
index 326f73b975..792b7fe14a 100644
--- a/tensorflow/compiler/aot/tfcompile.bzl
+++ b/tensorflow/compiler/aot/tfcompile.bzl
@@ -105,12 +105,18 @@ def tf_library(
freeze_file = freeze_name + ".pb"
# First run tfcompile to generate the list of out_nodes.
+ #
+ # Here and below, we set CUDA_VISIBLE_DEVICES='' to prevent the code we
+ # launch from using any GPUs which might be present. This is important
+ # because builds may run concurrently with tests, and tests need to be
+ # able to assume that they have control of the full GPU.
out_nodes_file = "out_nodes_" + freeze_name
native.genrule(
name = ("gen_" + out_nodes_file),
srcs = [config],
outs = [out_nodes_file],
- cmd = ("$(location " + tfcompile_tool + ")" +
+ cmd = ("CUDA_VISIBLE_DEVICES='' " +
+ "$(location " + tfcompile_tool + ")" +
" --config=$(location " + config + ")" +
" --dump_fetch_nodes > $@"),
tools = [tfcompile_tool],
@@ -142,9 +148,12 @@ def tf_library(
out_nodes_file,
] + freeze_saver_srcs,
outs = [freeze_file],
- cmd = ("$(location " +
- "//tensorflow/python/tools:freeze_graph)" +
- freeze_args),
+ cmd = (
+ "CUDA_VISIBLE_DEVICES='' " +
+ "$(location " +
+ "//tensorflow/python/tools:freeze_graph)" +
+ freeze_args
+ ),
tools = ["//tensorflow/python/tools:freeze_graph"],
tags = tags,
)
@@ -177,16 +186,19 @@ def tf_library(
metadata_object_file,
function_object_file,
],
- cmd = ("$(location " + tfcompile_tool + ")" +
- " --graph=$(location " + tfcompile_graph + ")" +
- " --config=$(location " + config + ")" +
- " --entry_point=" + ep +
- " --cpp_class=" + cpp_class +
- " --target_triple=" + target_llvm_triple() +
- " --out_header=$(@D)/" + header_file +
- " --out_metadata_object=$(@D)/" + metadata_object_file +
- " --out_function_object=$(@D)/" + function_object_file +
- " " + flags + " " + profiling_flag),
+ cmd = (
+ "CUDA_VISIBLE_DEVICES='' " +
+ "$(location " + tfcompile_tool + ")" +
+ " --graph=$(location " + tfcompile_graph + ")" +
+ " --config=$(location " + config + ")" +
+ " --entry_point=" + ep +
+ " --cpp_class=" + cpp_class +
+ " --target_triple=" + target_llvm_triple() +
+ " --out_header=$(@D)/" + header_file +
+ " --out_metadata_object=$(@D)/" + metadata_object_file +
+ " --out_function_object=$(@D)/" + function_object_file +
+ " " + flags + " " + profiling_flag
+ ),
tools = [tfcompile_tool],
visibility = visibility,
testonly = testonly,
@@ -216,14 +228,17 @@ def tf_library(
outs = [
session_module_pb,
],
- cmd = ("$(location " + tfcompile_tool + ")" +
- " --graph=$(location " + tfcompile_graph + ")" +
- " --config=$(location " + config + ")" +
- " --entry_point=" + ep +
- " --cpp_class=" + cpp_class +
- " --target_triple=" + target_llvm_triple() +
- " --out_session_module=$(@D)/" + session_module_pb +
- " " + flags),
+ cmd = (
+ "CUDA_VISIBLE_DEVICES='' " +
+ "$(location " + tfcompile_tool + ")" +
+ " --graph=$(location " + tfcompile_graph + ")" +
+ " --config=$(location " + config + ")" +
+ " --entry_point=" + ep +
+ " --cpp_class=" + cpp_class +
+ " --target_triple=" + target_llvm_triple() +
+ " --out_session_module=$(@D)/" + session_module_pb +
+ " " + flags
+ ),
tools = [tfcompile_tool],
visibility = visibility,
testonly = testonly,
diff --git a/tensorflow/compiler/aot/tfcompile_main.cc b/tensorflow/compiler/aot/tfcompile_main.cc
index f3c44e9dda..b95b063348 100644
--- a/tensorflow/compiler/aot/tfcompile_main.cc
+++ b/tensorflow/compiler/aot/tfcompile_main.cc
@@ -20,6 +20,7 @@ limitations under the License.
#include "absl/strings/match.h"
#include "absl/strings/str_join.h"
+#include "absl/strings/string_view.h"
#include "tensorflow/compiler/aot/codegen.h"
#include "tensorflow/compiler/aot/compile.h"
#include "tensorflow/compiler/aot/flags.h"
@@ -34,7 +35,6 @@ limitations under the License.
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/lib/core/errors.h"
-#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/init_main.h"
@@ -92,8 +92,9 @@ Status Main(const MainFlags& flags) {
// Write output files.
Env* env = Env::Default();
const std::vector<char>& obj = compile_result.aot->object_file_data();
- TF_RETURN_IF_ERROR(WriteStringToFile(env, flags.out_function_object,
- StringPiece(obj.data(), obj.size())));
+ TF_RETURN_IF_ERROR(
+ WriteStringToFile(env, flags.out_function_object,
+ absl::string_view(obj.data(), obj.size())));
CodegenOpts codegen_opts;
codegen_opts.gen_name_to_index = flags.gen_name_to_index;
codegen_opts.gen_program_shape = flags.gen_program_shape;
diff --git a/tensorflow/compiler/jit/BUILD b/tensorflow/compiler/jit/BUILD
index df81f3c23e..de7cd26d1d 100644
--- a/tensorflow/compiler/jit/BUILD
+++ b/tensorflow/compiler/jit/BUILD
@@ -410,6 +410,7 @@ cc_library(
"//tensorflow/core:graph",
"//tensorflow/core:protos_all_cc",
"//tensorflow/core/kernels:bounds_check",
+ "@com_google_absl//absl/strings",
"@com_google_absl//absl/types:optional",
],
)
@@ -566,6 +567,7 @@ cc_library(
"//tensorflow/core/grappler:grappler_item",
"//tensorflow/core/grappler/optimizers:custom_graph_optimizer",
"//tensorflow/core/grappler/optimizers:custom_graph_optimizer_registry",
+ "@com_google_absl//absl/strings",
],
)
diff --git a/tensorflow/compiler/jit/deadness_analysis.cc b/tensorflow/compiler/jit/deadness_analysis.cc
index 82aa03810b..9128b48da3 100644
--- a/tensorflow/compiler/jit/deadness_analysis.cc
+++ b/tensorflow/compiler/jit/deadness_analysis.cc
@@ -154,7 +154,7 @@ class AndPredicate : public Predicate {
std::back_inserter(operands_str),
[](Predicate* pred) { return pred->ToString(); });
- return strings::StrCat("(", absl::StrJoin(operands_str, " & "), ")");
+ return absl::StrCat("(", absl::StrJoin(operands_str, " & "), ")");
}
Kind kind() const override { return Kind::kAnd; }
@@ -185,7 +185,7 @@ class OrPredicate : public Predicate {
std::back_inserter(operands_str),
[](Predicate* pred) { return pred->ToString(); });
- return strings::StrCat("(", absl::StrJoin(operands_str, " | "), ")");
+ return absl::StrCat("(", absl::StrJoin(operands_str, " | "), ")");
}
Kind kind() const override { return Kind::kOr; }
@@ -206,7 +206,7 @@ class NotPredicate : public Predicate {
operands_({operand}) {}
string ToString() const override {
- return strings::StrCat("~", operand()->ToString());
+ return absl::StrCat("~", operand()->ToString());
}
Kind kind() const override { return Kind::kNot; }
@@ -240,8 +240,8 @@ class AndRecurrencePredicate : public Predicate {
Predicate* step() const { return operands_[1]; }
string ToString() const override {
- return strings::StrCat("{", start()->ToString(), ",&,", step()->ToString(),
- "}");
+ return absl::StrCat("{", start()->ToString(), ",&,", step()->ToString(),
+ "}");
}
Kind kind() const override { return Kind::kAndRecurrence; }
@@ -267,7 +267,7 @@ class SymbolPredicate : public Predicate {
must_be_true_(must_be_true) {}
string ToString() const override {
- return must_be_true() ? strings::StrCat("*", tensor_id_.ToString())
+ return must_be_true() ? absl::StrCat("*", tensor_id_.ToString())
: tensor_id_.ToString();
}
diff --git a/tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc b/tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc
index 2788102620..ae7a22f451 100644
--- a/tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc
+++ b/tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc
@@ -22,6 +22,7 @@ limitations under the License.
#include <unordered_map>
#include <vector>
+#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/jit/graphcycles/graphcycles.h"
#include "tensorflow/compiler/jit/mark_for_compilation_pass.h"
#include "tensorflow/compiler/jit/shape_inference_helpers.h"
@@ -45,7 +46,6 @@ limitations under the License.
#include "tensorflow/core/lib/gtl/flatset.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/hash/hash.h"
-#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/device_name_utils.h"
@@ -755,7 +755,7 @@ Status Encapsulator::Subgraph::RecordArg(
if (inserted) {
NodeDef arg_def;
NodeDefBuilder builder(
- strings::StrCat(src_node->name(), "_", src_slot, "_arg"), kArgOp);
+ absl::StrCat(src_node->name(), "_", src_slot, "_arg"), kArgOp);
DataType dtype = edge->dst()->input_type(edge->dst_input());
builder.Attr("T", dtype);
builder.Attr("index", arg_index);
@@ -790,7 +790,7 @@ Status Encapsulator::Subgraph::RecordResult(
if (inserted) {
NodeDef ret_def;
NodeDefBuilder builder(
- strings::StrCat(src_node->name(), "_", src_slot, "_retval"), kRetValOp);
+ absl::StrCat(src_node->name(), "_", src_slot, "_retval"), kRetValOp);
DataType dtype = src_node->output_type(src_slot);
builder.Attr("T", dtype);
builder.Attr("index", ret_index);
@@ -950,16 +950,15 @@ Status Encapsulator::Subgraph::AddHostComputes(
}
NodeDef host_compute_def;
- NodeDefBuilder builder(strings::StrCat("outside_compilation_",
- oc_subgraph_name, "_host_compute"),
+ NodeDefBuilder builder(absl::StrCat("outside_compilation_",
+ oc_subgraph_name, "_host_compute"),
kHostComputeOp);
builder.Input(inputs);
builder.Attr("Tinputs", input_dtypes);
builder.Attr("Toutputs", output_dtypes);
builder.Attr("ancestors", host_compute_ancestors);
- builder.Attr("key",
- strings::StrCat("host_compute_channel_", subgraph_name, "_",
- oc_subgraph_name));
+ builder.Attr("key", absl::StrCat("host_compute_channel_", subgraph_name,
+ "_", oc_subgraph_name));
builder.Attr("_outside_compilation_subgraph", oc_subgraph_name);
Status s = builder.Finalize(&host_compute_def);
if (!s.ok()) return s;
@@ -1017,8 +1016,7 @@ Status Encapsulator::Subgraph::MakeSequencingNode(const string& subgraph_name,
Graph* graph_out) {
if (sequencer_ == nullptr) {
NodeDef seq_def;
- NodeDefBuilder builder(strings::StrCat(subgraph_name, "_sequencer"),
- "NoOp");
+ NodeDefBuilder builder(absl::StrCat(subgraph_name, "_sequencer"), "NoOp");
builder.Attr(kXlaHostTransferSequencerAttr, subgraph_name);
builder.Device(device_);
Status s = builder.Finalize(&seq_def);
@@ -1091,10 +1089,10 @@ Status Encapsulator::Subgraph::BuildFunctionDef(
if (VLOG_IS_ON(1)) {
VLOG(2) << "Build function def " << name;
- dump_graph::DumpGraphToFile(
- strings::StrCat("encapsulate_fdef_graph_", name), *graph_, library);
- dump_graph::DumpFunctionDefToFile(
- strings::StrCat("encapsulate_fdef_", name), fdef);
+ dump_graph::DumpGraphToFile(absl::StrCat("encapsulate_fdef_graph_", name),
+ *graph_, library);
+ dump_graph::DumpFunctionDefToFile(absl::StrCat("encapsulate_fdef_", name),
+ fdef);
}
if (!reuse_existing_functions || library->Find(name) == nullptr) {
@@ -1130,8 +1128,8 @@ Status Encapsulator::Subgraph::AddShapeInferenceInfo(
host_compute->AddAttr("shapes", shapes);
} else {
string inference_graph_name =
- strings::StrCat("_outside_compilation_shape_inference_", subgraph_name,
- "_", outside_compilation_subgraph_name);
+ absl::StrCat("_outside_compilation_shape_inference_", subgraph_name,
+ "_", outside_compilation_subgraph_name);
FunctionDef fdef;
TF_RETURN_IF_ERROR(
GraphToFunctionDef(*inference_graph, inference_graph_name, &fdef));
@@ -1155,10 +1153,10 @@ Status Encapsulator::Subgraph::ReplaceFunctionDef(
if (VLOG_IS_ON(1)) {
VLOG(2) << "Replace function def " << name;
dump_graph::DumpGraphToFile(
- strings::StrCat("replace_encapsulate_fdef_graph_", name), *graph_,
+ absl::StrCat("replace_encapsulate_fdef_graph_", name), *graph_,
library);
dump_graph::DumpFunctionDefToFile(
- strings::StrCat("replace_encapsulate_fdef_", name), fdef);
+ absl::StrCat("replace_encapsulate_fdef_", name), fdef);
}
TF_RETURN_IF_ERROR(library->ReplaceFunction(name, fdef));
@@ -1186,8 +1184,7 @@ Status Encapsulator::Subgraph::AddHostComputeKeyPlaceholder(
GraphDefBuilder::Options options(graph_out, /*status=*/nullptr);
NodeDef key_def;
NodeDefBuilder builder(
- strings::StrCat(call_node_def_.name(), "_key_placeholder"),
- "Placeholder");
+ absl::StrCat(call_node_def_.name(), "_key_placeholder"), "Placeholder");
builder.Attr("dtype", DT_STRING);
builder.Attr("shape", shape_proto);
builder.Attr("_host_compute_call_node", call_node_def_.name());
@@ -1221,16 +1218,16 @@ Status Encapsulator::Subgraph::AddRecvAtHostNode(
}
NodeDef recv_def;
- NodeDefBuilder builder(strings::StrCat("outside_compilation_", subgraph_name,
- "_", oc_subgraph_name, "_recv"),
+ NodeDefBuilder builder(absl::StrCat("outside_compilation_", subgraph_name,
+ "_", oc_subgraph_name, "_recv"),
kRecvAtHostOp);
builder.Device(device_);
builder.Attr("Toutputs", dtypes);
// The correct device_ordinal will be inserted during replication in a
// subsequent rewrite.
builder.Attr("device_ordinal", 0);
- builder.Attr("key", strings::StrCat("host_compute_channel_", subgraph_name,
- "_", oc_subgraph_name));
+ builder.Attr("key", absl::StrCat("host_compute_channel_", subgraph_name, "_",
+ oc_subgraph_name));
builder.Attr(group_attribute, subgraph_name);
builder.Attr(outside_compilation_attribute, oc_subgraph_name);
builder.Input(host_compute_key_placeholder_->name(), 0, DT_STRING);
@@ -1276,13 +1273,13 @@ Status Encapsulator::Subgraph::AddSendFromHostNode(
}
NodeDef send_def;
- NodeDefBuilder builder(strings::StrCat("outside_compilation_", subgraph_name,
- "_", oc_subgraph_name, "_send"),
+ NodeDefBuilder builder(absl::StrCat("outside_compilation_", subgraph_name,
+ "_", oc_subgraph_name, "_send"),
kSendFromHostOp);
builder.Device(device_);
builder.Attr("Tinputs", dtypes);
- builder.Attr("key", strings::StrCat("host_compute_channel_", subgraph_name,
- "_", oc_subgraph_name));
+ builder.Attr("key", absl::StrCat("host_compute_channel_", subgraph_name, "_",
+ oc_subgraph_name));
// The correct device_ordinal will be inserted during replication in a
// subsequent rewrite.
builder.Attr("device_ordinal", 0);
@@ -1516,7 +1513,7 @@ Status Encapsulator::SplitIntoSubgraphs(FunctionLibraryDefinition* library) {
// Dump subgraphs.
for (auto& entry : subgraphs_) {
dump_graph::DumpGraphToFile(
- strings::StrCat("encapsulate_subgraphs_subgraph_", entry.first),
+ absl::StrCat("encapsulate_subgraphs_subgraph_", entry.first),
*entry.second.GetGraph(), library);
}
}
@@ -2052,7 +2049,7 @@ struct PathDetails {
struct SubgraphAndClusterHash {
inline std::size_t operator()(const SubgraphAndCluster& v) const {
return hash<string>()(
- strings::StrCat(v.subgraph, v.outside_compilation_cluster));
+ absl::StrCat(v.subgraph, v.outside_compilation_cluster));
}
};
diff --git a/tensorflow/compiler/jit/encapsulate_subgraphs_pass_test.cc b/tensorflow/compiler/jit/encapsulate_subgraphs_pass_test.cc
index 7bc0ef0303..49958093b8 100644
--- a/tensorflow/compiler/jit/encapsulate_subgraphs_pass_test.cc
+++ b/tensorflow/compiler/jit/encapsulate_subgraphs_pass_test.cc
@@ -16,6 +16,7 @@ limitations under the License.
#include <memory>
#include <utility>
+#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/jit/encapsulate_subgraphs_pass.h"
#include "absl/strings/match.h"
@@ -48,7 +49,7 @@ Status AddGraphDefToFunctionLibrary(const GraphDefBuilder& graphdef_builder,
FunctionDef* fdef = library->add_function();
TF_RETURN_IF_ERROR(GraphToFunctionDef(
*graph,
- strings::StrCat("_outside_compilation_shape_inference_", name_suffix),
+ absl::StrCat("_outside_compilation_shape_inference_", name_suffix),
fdef));
return Status::OK();
}
@@ -65,18 +66,18 @@ bool EqualProtoMap(const ::tensorflow::protobuf::Map<Tkey, Tvalue>& a,
const auto iter = b.find(elt_a.first);
if (iter == b.end()) {
if (diff) {
- *diff = strings::StrCat(
- map_name, " expected: contains element with key '",
- key_to_string(elt_a.first), "' got: map has no such element");
+ *diff = absl::StrCat(map_name, " expected: contains element with key '",
+ key_to_string(elt_a.first),
+ "' got: map has no such element");
}
return false;
}
if (!compare(elt_a.first, elt_a.second, iter->second)) {
if (diff) {
- *diff = strings::StrCat(map_name, " expected: element with key '",
- key_to_string(elt_a.first), "' has value '",
- value_to_string(elt_a.second), "' got: '",
- value_to_string(iter->second), "'");
+ *diff = absl::StrCat(map_name, " expected: element with key '",
+ key_to_string(elt_a.first), "' has value '",
+ value_to_string(elt_a.second), "' got: '",
+ value_to_string(iter->second), "'");
}
return false;
}
@@ -85,9 +86,9 @@ bool EqualProtoMap(const ::tensorflow::protobuf::Map<Tkey, Tvalue>& a,
const auto iter = a.find(elt_b.first);
if (iter == a.end()) {
if (diff) {
- *diff = strings::StrCat(map_name, " got: contains element with key '",
- key_to_string(elt_b.first),
- "' expected: map has no such element");
+ *diff = absl::StrCat(map_name, " got: contains element with key '",
+ key_to_string(elt_b.first),
+ "' expected: map has no such element");
}
return false;
}
@@ -99,25 +100,25 @@ bool EqualFunctionNodeDef(const NodeDef& a, const NodeDef& b,
const string& diff_preamble, string* diff) {
if (a.op() != b.op()) {
if (diff) {
- *diff = strings::StrCat(diff_preamble, " mismatch for node ", a.name(),
- ", expected op '", a.op(), "' got '", b.op());
+ *diff = absl::StrCat(diff_preamble, " mismatch for node ", a.name(),
+ ", expected op '", a.op(), "' got '", b.op());
}
return false;
}
if (a.device() != b.device()) {
if (diff) {
- *diff = strings::StrCat(diff_preamble, " mismatch for node ", a.name(),
- ", expected device '", a.device(), "' got '",
- b.device());
+ *diff = absl::StrCat(diff_preamble, " mismatch for node ", a.name(),
+ ", expected device '", a.device(), "' got '",
+ b.device());
}
return false;
}
if (a.input_size() != b.input_size()) {
if (diff) {
- *diff = strings::StrCat(diff_preamble, " mismatch for node ", a.name(),
- ", expected ", a.input_size(), " inputs got ",
- b.input_size(), " expected:\n", a.DebugString(),
- "\ngot:\n", b.DebugString());
+ *diff = absl::StrCat(diff_preamble, " mismatch for node ", a.name(),
+ ", expected ", a.input_size(), " inputs got ",
+ b.input_size(), " expected:\n", a.DebugString(),
+ "\ngot:\n", b.DebugString());
}
return false;
}
@@ -127,10 +128,10 @@ bool EqualFunctionNodeDef(const NodeDef& a, const NodeDef& b,
if (absl::StartsWith(a.input(i), "^")) {
if (!absl::StartsWith(b.input(i), "^")) {
if (diff) {
- *diff = strings::StrCat(
- diff_preamble, " mismatch for node ", a.name(), " input ", i,
- ", expected control input ", a.input(i), " got ", b.input(i),
- " expected:\n", a.DebugString(), "\ngot:\n", b.DebugString());
+ *diff = absl::StrCat(diff_preamble, " mismatch for node ", a.name(),
+ " input ", i, ", expected control input ",
+ a.input(i), " got ", b.input(i), " expected:\n",
+ a.DebugString(), "\ngot:\n", b.DebugString());
}
return false;
}
@@ -138,19 +139,19 @@ bool EqualFunctionNodeDef(const NodeDef& a, const NodeDef& b,
control_input_b.insert(b.input(i));
} else if (a.input(i) != b.input(i)) {
if (diff) {
- *diff = strings::StrCat(diff_preamble, " mismatch for node ", a.name(),
- " input ", i, ", expected ", a.input(i),
- " got ", b.input(i), " expected:\n",
- a.DebugString(), "\ngot:\n", b.DebugString());
+ *diff = absl::StrCat(diff_preamble, " mismatch for node ", a.name(),
+ " input ", i, ", expected ", a.input(i), " got ",
+ b.input(i), " expected:\n", a.DebugString(),
+ "\ngot:\n", b.DebugString());
}
return false;
}
}
if (control_input_a != control_input_b) {
if (diff) {
- *diff = strings::StrCat(diff_preamble, " mismatch for node ", a.name(),
- " control inputs differ expected:\n",
- a.DebugString(), "\ngot:\n", b.DebugString());
+ *diff = absl::StrCat(diff_preamble, " mismatch for node ", a.name(),
+ " control inputs differ expected:\n",
+ a.DebugString(), "\ngot:\n", b.DebugString());
}
return false;
}
@@ -170,18 +171,17 @@ bool EqualFunctionNodeDef(const NodeDef& a, const NodeDef& b,
return av.DebugString() == bv.DebugString();
}
},
- strings::StrCat(diff_preamble, " attr mismatch for node ", a.name()),
- diff);
+ absl::StrCat(diff_preamble, " attr mismatch for node ", a.name()), diff);
}
bool EqualFunctionDef(const FunctionDef& a, const FunctionDef& b,
string* diff) {
if (a.signature().DebugString() != b.signature().DebugString()) {
if (diff) {
- *diff = strings::StrCat("Signature mismatch for function ",
- a.signature().name(), ", expected:\n",
- a.signature().DebugString(), "\ngot:\n",
- b.signature().DebugString());
+ *diff =
+ absl::StrCat("Signature mismatch for function ", a.signature().name(),
+ ", expected:\n", a.signature().DebugString(), "\ngot:\n",
+ b.signature().DebugString());
}
return false;
}
@@ -191,7 +191,7 @@ bool EqualFunctionDef(const FunctionDef& a, const FunctionDef& b,
[](const string& key, const AttrValue& av, const AttrValue& bv) {
return av.DebugString() == bv.DebugString();
},
- strings::StrCat("attr mismatch for function ", a.signature().name()),
+ absl::StrCat("attr mismatch for function ", a.signature().name()),
diff)) {
return false;
}
@@ -201,7 +201,7 @@ bool EqualFunctionDef(const FunctionDef& a, const FunctionDef& b,
[](const string& key, const string& av, const string& bv) {
return av == bv;
},
- strings::StrCat("ret mismatch for function ", a.signature().name()),
+ absl::StrCat("ret mismatch for function ", a.signature().name()),
diff)) {
return false;
}
@@ -211,7 +211,7 @@ bool EqualFunctionDef(const FunctionDef& a, const FunctionDef& b,
if (a.node_def(i).name() == b.node_def(j).name()) {
if (!EqualFunctionNodeDef(
a.node_def(i), b.node_def(j),
- strings::StrCat("Function ", a.signature().name()), diff)) {
+ absl::StrCat("Function ", a.signature().name()), diff)) {
return false;
}
found = true;
@@ -220,9 +220,9 @@ bool EqualFunctionDef(const FunctionDef& a, const FunctionDef& b,
}
if (!found) {
if (diff) {
- *diff = strings::StrCat("Function ", a.signature().name(),
- ", expected: has node '", a.node_def(i).name(),
- "' got: no node of that name");
+ *diff = absl::StrCat("Function ", a.signature().name(),
+ ", expected: has node '", a.node_def(i).name(),
+ "' got: no node of that name");
}
return false;
}
@@ -237,9 +237,9 @@ bool EqualFunctionDef(const FunctionDef& a, const FunctionDef& b,
}
if (!found) {
if (diff) {
- *diff = strings::StrCat("Function ", a.signature().name(),
- ", got: has node '", b.node_def(i).name(),
- "' expected: no node of that name");
+ *diff = absl::StrCat("Function ", a.signature().name(),
+ ", got: has node '", b.node_def(i).name(),
+ "' expected: no node of that name");
}
return false;
}
@@ -258,8 +258,8 @@ bool EqualFunctionDefLibrary(const FunctionDefLibrary& expected,
auto it = actual_index.find(expected_function.signature().name());
if (it == actual_index.end()) {
if (diff) {
- *diff = strings::StrCat("Did not find expected function '",
- expected_function.signature().name(), "'");
+ *diff = absl::StrCat("Did not find expected function '",
+ expected_function.signature().name(), "'");
}
return false;
}
@@ -269,9 +269,9 @@ bool EqualFunctionDefLibrary(const FunctionDefLibrary& expected,
if (!actual_index.empty()) {
if (diff != nullptr) {
- *diff = strings::StrCat("Found unexpected function '",
- actual_index.begin()->second->signature().name(),
- "'");
+ *diff =
+ absl::StrCat("Found unexpected function '",
+ actual_index.begin()->second->signature().name(), "'");
}
return false;
}
@@ -420,10 +420,9 @@ Node* RecvAtHost(ops::NodeOut key_input, const string& cluster,
const string& oc_cluster, absl::Span<const DataType> dtypes,
const GraphDefBuilder::Options& opts) {
if (opts.HaveError()) return nullptr;
- string key =
- strings::StrCat("host_compute_channel_", cluster, "_", oc_cluster);
- string name = strings::StrCat("outside_compilation_", cluster, "_",
- oc_cluster, "_recv");
+ string key = absl::StrCat("host_compute_channel_", cluster, "_", oc_cluster);
+ string name =
+ absl::StrCat("outside_compilation_", cluster, "_", oc_cluster, "_recv");
NodeBuilder node_builder(opts.WithName(name).GetNameForOp("_XlaRecvAtHost"),
"_XlaRecvAtHost", opts.op_registry());
node_builder.Input(std::move(key_input));
@@ -440,10 +439,9 @@ Node* SendFromHost(ops::NodeOut key_input, const string& cluster,
const std::vector<ops::NodeOut>& inputs,
const GraphDefBuilder::Options& opts) {
if (opts.HaveError()) return nullptr;
- string key =
- strings::StrCat("host_compute_channel_", cluster, "_", oc_cluster);
- string name = strings::StrCat("outside_compilation_", cluster, "_",
- oc_cluster, "_send");
+ string key = absl::StrCat("host_compute_channel_", cluster, "_", oc_cluster);
+ string name =
+ absl::StrCat("outside_compilation_", cluster, "_", oc_cluster, "_send");
NodeBuilder node_builder(opts.WithName(name).GetNameForOp("_XlaSendFromHost"),
"_XlaSendFromHost", opts.op_registry());
node_builder.Input(inputs);
@@ -682,8 +680,8 @@ std::vector<std::pair<string, string>> GraphEdges(const Graph& graph) {
for (const Edge* edge : graph.edges()) {
if (edge->src()->IsSource() || edge->dst()->IsSink()) continue;
edges.emplace_back(
- strings::StrCat(edge->src()->name(), ":", edge->src_output()),
- strings::StrCat(edge->dst()->name(), ":", edge->dst_input()));
+ absl::StrCat(edge->src()->name(), ":", edge->src_output()),
+ absl::StrCat(edge->dst()->name(), ":", edge->dst_input()));
}
std::sort(edges.begin(), edges.end());
return edges;
diff --git a/tensorflow/compiler/jit/graphcycles/BUILD b/tensorflow/compiler/jit/graphcycles/BUILD
index 676f71a75a..8212956adf 100644
--- a/tensorflow/compiler/jit/graphcycles/BUILD
+++ b/tensorflow/compiler/jit/graphcycles/BUILD
@@ -14,6 +14,7 @@ cc_library(
hdrs = ["graphcycles.h"],
deps = [
"//tensorflow/core:lib",
+ "@com_google_absl//absl/container:inlined_vector",
],
)
diff --git a/tensorflow/compiler/jit/graphcycles/graphcycles.cc b/tensorflow/compiler/jit/graphcycles/graphcycles.cc
index 805bbc62c1..756377bd95 100644
--- a/tensorflow/compiler/jit/graphcycles/graphcycles.cc
+++ b/tensorflow/compiler/jit/graphcycles/graphcycles.cc
@@ -34,7 +34,7 @@ limitations under the License.
#include <algorithm>
#include <unordered_set>
-#include "tensorflow/core/lib/gtl/inlined_vector.h"
+#include "absl/container/inlined_vector.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
@@ -44,7 +44,7 @@ namespace {
typedef std::unordered_set<int32> NodeSet;
template <typename T>
struct VecStruct {
- typedef gtl::InlinedVector<T, 4> type;
+ typedef absl::InlinedVector<T, 4> type;
};
template <typename T>
using Vec = typename VecStruct<T>::type;
diff --git a/tensorflow/compiler/jit/mark_for_compilation_pass.cc b/tensorflow/compiler/jit/mark_for_compilation_pass.cc
index 4e4abade32..44caf0be52 100644
--- a/tensorflow/compiler/jit/mark_for_compilation_pass.cc
+++ b/tensorflow/compiler/jit/mark_for_compilation_pass.cc
@@ -43,7 +43,6 @@ limitations under the License.
#include "tensorflow/core/kernels/bounds_check.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/gtl/flatset.h"
-#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/public/version.h"
@@ -617,7 +616,7 @@ Status MarkForCompilationPass::Run(
}
static string RatioToString(int numerator, int denominator) {
- return strings::Printf("%d / %d (%.2f%%)", numerator, denominator,
+ return absl::StrFormat("%d / %d (%.2f%%)", numerator, denominator,
(100.0 * numerator) / denominator);
}
@@ -626,14 +625,14 @@ static void VLogClusteringSummary(const Graph& g) {
return;
}
- std::map<StringPiece, int> cluster_name_to_size;
- std::map<StringPiece, std::map<StringPiece, int>>
+ std::map<absl::string_view, int> cluster_name_to_size;
+ std::map<absl::string_view, std::map<absl::string_view, int>>
cluster_name_to_op_histogram;
- std::map<StringPiece, int> unclustered_op_histogram;
+ std::map<absl::string_view, int> unclustered_op_histogram;
int clustered_node_count = 0;
for (Node* n : g.nodes()) {
- absl::optional<StringPiece> cluster_name = GetXlaClusterForNode(*n);
+ absl::optional<absl::string_view> cluster_name = GetXlaClusterForNode(*n);
if (cluster_name) {
clustered_node_count++;
cluster_name_to_size[*cluster_name]++;
@@ -650,7 +649,7 @@ static void VLogClusteringSummary(const Graph& g) {
<< RatioToString(clustered_node_count, g.num_nodes());
for (const auto& cluster_name_size_pair : cluster_name_to_size) {
- StringPiece cluster_name = cluster_name_size_pair.first;
+ absl::string_view cluster_name = cluster_name_size_pair.first;
int size = cluster_name_size_pair.second;
VLOG(2) << " " << cluster_name << " "
<< RatioToString(size, g.num_nodes());
@@ -670,14 +669,15 @@ static void VLogClusteringSummary(const Graph& g) {
}
struct EdgeInfo {
- StringPiece node_name;
- absl::optional<StringPiece> cluster_name;
+ absl::string_view node_name;
+ absl::optional<absl::string_view> cluster_name;
- StringPiece GetClusterName() const {
+ absl::string_view GetClusterName() const {
return cluster_name ? *cluster_name : "[none]";
}
- std::pair<StringPiece, absl::optional<StringPiece>> AsPair() const {
+ std::pair<absl::string_view, absl::optional<absl::string_view>> AsPair()
+ const {
return {node_name, cluster_name};
}
@@ -686,19 +686,21 @@ static void VLogClusteringSummary(const Graph& g) {
}
};
- using EdgeInfoMap = std::map<StringPiece, std::map<EdgeInfo, int64>>;
+ using EdgeInfoMap = std::map<absl::string_view, std::map<EdgeInfo, int64>>;
EdgeInfoMap incoming_edge_infos;
EdgeInfoMap outgoing_edge_infos;
- std::set<StringPiece> cluster_names_to_print;
+ std::set<absl::string_view> cluster_names_to_print;
for (const Edge* e : g.edges()) {
const Node* from = e->src();
- absl::optional<StringPiece> from_cluster_name = GetXlaClusterForNode(*from);
+ absl::optional<absl::string_view> from_cluster_name =
+ GetXlaClusterForNode(*from);
const Node* to = e->dst();
- absl::optional<StringPiece> to_cluster_name = GetXlaClusterForNode(*to);
+ absl::optional<absl::string_view> to_cluster_name =
+ GetXlaClusterForNode(*to);
if (to_cluster_name == from_cluster_name) {
continue;
@@ -721,9 +723,9 @@ static void VLogClusteringSummary(const Graph& g) {
VLOG(2) << " [none]";
}
- auto print_edge_info_set_for_cluster = [&](StringPiece cluster_name,
+ auto print_edge_info_set_for_cluster = [&](absl::string_view cluster_name,
const EdgeInfoMap& edge_info_map,
- StringPiece desc) {
+ absl::string_view desc) {
auto it = edge_info_map.find(cluster_name);
if (it != edge_info_map.end()) {
VLOG(2) << " " << it->second.size() << " " << desc << " edges";
@@ -737,7 +739,7 @@ static void VLogClusteringSummary(const Graph& g) {
}
};
- for (StringPiece cluster_name : cluster_names_to_print) {
+ for (absl::string_view cluster_name : cluster_names_to_print) {
VLOG(2) << " ** Cluster " << cluster_name;
print_edge_info_set_for_cluster(cluster_name, incoming_edge_infos,
"incoming");
@@ -966,7 +968,7 @@ Status MarkForCompilationPass::RunImpl(
string& name = cluster_names[cluster];
if (name.empty()) {
- name = strings::StrCat("cluster_", cluster_sequence_num++);
+ name = absl::StrCat("cluster_", cluster_sequence_num++);
}
n->AddAttr(kXlaClusterAttr, name);
VLOG(3) << "Assigning node " << n->name() << " to cluster " << name;
diff --git a/tensorflow/compiler/jit/mark_for_compilation_pass_test.cc b/tensorflow/compiler/jit/mark_for_compilation_pass_test.cc
index 807ab51fd3..9473ac0a4c 100644
--- a/tensorflow/compiler/jit/mark_for_compilation_pass_test.cc
+++ b/tensorflow/compiler/jit/mark_for_compilation_pass_test.cc
@@ -633,7 +633,7 @@ TEST(XlaCompilationTest, IllegalCycle_UsefulErrorMessage) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
{
- auto BuildNoopNode = [](StringPiece name, Graph* graph) {
+ auto BuildNoopNode = [](absl::string_view name, Graph* graph) {
NodeDefBuilder builder(name, "NoOp");
NodeDef def;
TF_CHECK_OK(builder.Finalize(&def));
diff --git a/tensorflow/compiler/jit/partially_decluster_pass.cc b/tensorflow/compiler/jit/partially_decluster_pass.cc
index a8f09bfa50..584c963f71 100644
--- a/tensorflow/compiler/jit/partially_decluster_pass.cc
+++ b/tensorflow/compiler/jit/partially_decluster_pass.cc
@@ -14,6 +14,7 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/jit/partially_decluster_pass.h"
+#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "tensorflow/core/framework/memory_types.h"
#include "tensorflow/core/framework/node_def.pb.h"
@@ -30,7 +31,7 @@ Status FindNodesToDecluster(const Graph& graph, gtl::FlatSet<Node*>* result,
MemoryTypeVector input_mtypes, output_mtypes;
for (Node* n : post_order) {
- absl::optional<StringPiece> from_cluster = GetXlaClusterForNode(*n);
+ absl::optional<absl::string_view> from_cluster = GetXlaClusterForNode(*n);
if (!from_cluster) {
continue;
}
@@ -79,7 +80,7 @@ Status FindNodesToDecluster(const Graph& graph, gtl::FlatSet<Node*>* result,
// Check if `dst` is in a different cluster, unclustered, or about to be
// partially declustered (here we rely on the post-order traversal order).
// If yes, decluster `n` to avoid the device-to-host memcpy.
- absl::optional<StringPiece> dst_cluster =
+ absl::optional<absl::string_view> dst_cluster =
result->count(dst) ? absl::nullopt : GetXlaClusterForNode(*dst);
if (from_cluster != dst_cluster) {
CHECK(result->insert(n).second);
@@ -91,15 +92,16 @@ Status FindNodesToDecluster(const Graph& graph, gtl::FlatSet<Node*>* result,
}
Status PartiallyDeclusterNode(Graph* graph, Node* n) {
- StringPiece cluster_name = *GetXlaClusterForNode(*n);
- gtl::InlinedVector<const Edge*, 6> out_edges_to_clone;
+ absl::string_view cluster_name = *GetXlaClusterForNode(*n);
+ absl::InlinedVector<const Edge*, 6> out_edges_to_clone;
for (const Edge* out_edge : n->out_edges()) {
if (out_edge->IsControlEdge()) {
continue;
}
Node* dst = out_edge->dst();
- absl::optional<StringPiece> dst_cluster_name = GetXlaClusterForNode(*dst);
+ absl::optional<absl::string_view> dst_cluster_name =
+ GetXlaClusterForNode(*dst);
if (dst_cluster_name != cluster_name) {
out_edges_to_clone.push_back(out_edge);
}
@@ -108,7 +110,7 @@ Status PartiallyDeclusterNode(Graph* graph, Node* n) {
CHECK(!out_edges_to_clone.empty()) << n->DebugString();
NodeDef ndef = n->def();
- ndef.set_name(strings::StrCat(n->name(), "/declustered"));
+ ndef.set_name(absl::StrCat(n->name(), "/declustered"));
RemoveFromXlaCluster(&ndef);
Status s;
Node* cloned_node = graph->AddNode(ndef, &s);
diff --git a/tensorflow/compiler/jit/resource_operation_safety_analysis.cc b/tensorflow/compiler/jit/resource_operation_safety_analysis.cc
index 1ba4a5ef73..56e35c0059 100644
--- a/tensorflow/compiler/jit/resource_operation_safety_analysis.cc
+++ b/tensorflow/compiler/jit/resource_operation_safety_analysis.cc
@@ -165,7 +165,7 @@ bool IsEdgeSafe(XlaResourceOpKind from, XlaResourceOpKind to) {
using ResourceOp = std::pair<int, XlaResourceOpKind>;
string ResourceOpToString(const ResourceOp& resource_op) {
- return strings::StrCat(
+ return absl::StrCat(
resource_op.first, ": ",
XlaResourceOpInfo::XlaResourceOpKindToString(resource_op.second));
}
@@ -257,11 +257,11 @@ string ResourceOpSetToString(const ResourceOpSet& resource_op_set) {
std::vector<string> elements_debug_string;
std::transform(resource_op_set.begin(), resource_op_set.end(),
std::back_inserter(elements_debug_string), ResourceOpToString);
- return strings::StrCat("{", absl::StrJoin(elements_debug_string, ","), "}");
+ return absl::StrCat("{", absl::StrJoin(elements_debug_string, ","), "}");
}
string NodeToString(const Node& n, XlaResourceOpKind resource_op_kind) {
- return strings::StrCat(
+ return absl::StrCat(
"[", n.name(), ": ", n.type_string(), "(",
XlaResourceOpInfo::XlaResourceOpKindToString(resource_op_kind), ")", "]");
}
diff --git a/tensorflow/compiler/jit/xla_cluster_util.cc b/tensorflow/compiler/jit/xla_cluster_util.cc
index 4f2fabd658..03380e9406 100644
--- a/tensorflow/compiler/jit/xla_cluster_util.cc
+++ b/tensorflow/compiler/jit/xla_cluster_util.cc
@@ -17,6 +17,7 @@ limitations under the License.
#include <unordered_map>
+#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/jit/resource_operation_safety_analysis.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/graph/control_flow.h"
@@ -52,8 +53,8 @@ string DescribeCycle(const GraphCycles* cycles, const Graph& graph, int src,
};
string description;
- strings::StrAppend(&description, "Edge from ", node_name(src), " to ",
- node_name(dst), " would create a cycle.\n");
+ absl::StrAppend(&description, "Edge from ", node_name(src), " to ",
+ node_name(dst), " would create a cycle.\n");
path.resize(path_size);
for (int32 node_id : path) {
string ascii_art;
@@ -64,7 +65,7 @@ string DescribeCycle(const GraphCycles* cycles, const Graph& graph, int src,
} else {
ascii_art = "+-- ";
}
- strings::StrAppend(&description, ascii_art, node_name(node_id), "\n");
+ absl::StrAppend(&description, ascii_art, node_name(node_id), "\n");
}
return description;
}
@@ -186,7 +187,7 @@ Status CreateCycleDetectionGraph(const Graph* graph, GraphCycles* cycles) {
return Status::OK();
}
-absl::optional<StringPiece> GetXlaClusterForNode(const Node& node) {
+absl::optional<absl::string_view> GetXlaClusterForNode(const Node& node) {
const AttrValue* attr_value = node.attrs().Find(kXlaClusterAttr);
if (attr_value == nullptr) {
return absl::nullopt;
diff --git a/tensorflow/compiler/jit/xla_cluster_util.h b/tensorflow/compiler/jit/xla_cluster_util.h
index b0439a63ca..17ae510a0e 100644
--- a/tensorflow/compiler/jit/xla_cluster_util.h
+++ b/tensorflow/compiler/jit/xla_cluster_util.h
@@ -47,7 +47,7 @@ Status CreateCycleDetectionGraph(const Graph* graph, GraphCycles* cycles);
// Returns the XLA cluster in which `node` is placed if it is in an XLA cluster,
// otherwise returns nullopt.
-absl::optional<StringPiece> GetXlaClusterForNode(const Node& node);
+absl::optional<absl::string_view> GetXlaClusterForNode(const Node& node);
// Removes `node_def` its XLA cluster (by clearing its _XlaCluster attribute).
void RemoveFromXlaCluster(NodeDef* node_def);
diff --git a/tensorflow/compiler/jit/xla_compilation_cache.cc b/tensorflow/compiler/jit/xla_compilation_cache.cc
index ef6b0e67d3..3aa9e9c7ed 100644
--- a/tensorflow/compiler/jit/xla_compilation_cache.cc
+++ b/tensorflow/compiler/jit/xla_compilation_cache.cc
@@ -67,12 +67,12 @@ string XlaCompilationCache::DebugString() {
string XlaCompilationCache::SignatureDebugString(const Signature& sig) {
string result = sig.name;
for (const auto& a : sig.arg_types) {
- strings::StrAppend(&result, ",", DataTypeString(a.first),
- a.second.DebugString());
+ absl::StrAppend(&result, ",", DataTypeString(a.first),
+ a.second.DebugString());
}
for (const auto& v : sig.arg_values) {
- strings::StrAppend(&result, "; ", v.DebugString());
+ absl::StrAppend(&result, "; ", v.DebugString());
}
return result;
}
@@ -259,7 +259,7 @@ Status XlaCompilationCache::CompileImpl(
const XlaCompiler::CompileOptions& compile_options,
bool compile_single_op) {
CHECK_NE(executable, nullptr);
- VLOG(1) << "XlaCompilationCache::Compile " << DebugString();
+ VLOG(2) << "XlaCompilationCache::Compile " << DebugString();
if (VLOG_IS_ON(2)) {
VLOG(2) << "num_inputs=" << ctx->num_inputs()
@@ -310,7 +310,7 @@ Status XlaCompilationCache::CompileImpl(
// cache eviction.
mutex_lock entry_lock(entry->mu);
if (!entry->compiled) {
- VLOG(1) << "Compilation cache miss for signature: "
+ VLOG(2) << "Compilation cache miss for signature: "
<< SignatureDebugString(signature);
tensorflow::Env* env = tensorflow::Env::Default();
const uint64 compile_start_us = env->NowMicros();
diff --git a/tensorflow/compiler/jit/xla_device.cc b/tensorflow/compiler/jit/xla_device.cc
index f31879a2bc..51797def04 100644
--- a/tensorflow/compiler/jit/xla_device.cc
+++ b/tensorflow/compiler/jit/xla_device.cc
@@ -148,10 +148,9 @@ Status DefaultPaddedShapeFn(const Tensor& tensor, xla::Shape* shape) {
}
const DeviceAttributes attrs = Device::BuildDeviceAttributes(
- strings::StrCat(name_prefix, "/device:", device_name, ":",
- device_ordinal),
+ absl::StrCat(name_prefix, "/device:", device_name, ":", device_ordinal),
DeviceType(device_name), Bytes(16ULL << 30), DeviceLocality(),
- strings::StrCat("device: ", device_name, " device"));
+ absl::StrCat("device: ", device_name, " device"));
device->reset(
new XlaDevice(options, attrs, device_ordinal, DeviceType(jit_device_name),
diff --git a/tensorflow/compiler/jit/xla_device_context.cc b/tensorflow/compiler/jit/xla_device_context.cc
index ee07c5c964..af83c792e5 100644
--- a/tensorflow/compiler/jit/xla_device_context.cc
+++ b/tensorflow/compiler/jit/xla_device_context.cc
@@ -203,7 +203,7 @@ void XlaTransferManager::CopyCPUTensorToDevice(const Tensor* cpu_tensor,
}
void XlaTransferManager::CopyDeviceTensorToCPU(const Tensor* device_tensor,
- StringPiece tensor_name,
+ absl::string_view tensor_name,
Device* device,
Tensor* cpu_tensor,
StatusCallback done) {
@@ -339,7 +339,7 @@ void XlaDeviceContext::CopyCPUTensorToDevice(const Tensor* cpu_tensor,
}
void XlaDeviceContext::CopyDeviceTensorToCPU(const Tensor* device_tensor,
- StringPiece tensor_name,
+ absl::string_view tensor_name,
Device* device, Tensor* cpu_tensor,
StatusCallback done) {
manager_.CopyDeviceTensorToCPU(device_tensor, tensor_name, device, cpu_tensor,
diff --git a/tensorflow/compiler/jit/xla_device_context.h b/tensorflow/compiler/jit/xla_device_context.h
index 2e7445340c..df82421294 100644
--- a/tensorflow/compiler/jit/xla_device_context.h
+++ b/tensorflow/compiler/jit/xla_device_context.h
@@ -57,7 +57,7 @@ class XlaTransferManager {
void CopyCPUTensorToDevice(const Tensor* cpu_tensor, Device* device,
Tensor* device_tensor, StatusCallback done) const;
void CopyDeviceTensorToCPU(const Tensor* device_tensor,
- StringPiece tensor_name, Device* device,
+ absl::string_view tensor_name, Device* device,
Tensor* cpu_tensor, StatusCallback done);
void CopyDeviceTensorToDevice(const Tensor& src_tensor, Tensor* dst_tensor,
@@ -111,7 +111,7 @@ class XlaDeviceContext : public DeviceContext {
Tensor* device_tensor,
StatusCallback done) const override;
void CopyDeviceTensorToCPU(const Tensor* device_tensor,
- StringPiece tensor_name, Device* device,
+ absl::string_view tensor_name, Device* device,
Tensor* cpu_tensor, StatusCallback done) override;
void CopyDeviceTensorToDevice(const Tensor& src_tensor, Tensor* dst_tensor,
const StatusCallback& done);
diff --git a/tensorflow/compiler/jit/xla_device_ops.h b/tensorflow/compiler/jit/xla_device_ops.h
index 13da5d2f94..49c8582682 100644
--- a/tensorflow/compiler/jit/xla_device_ops.h
+++ b/tensorflow/compiler/jit/xla_device_ops.h
@@ -198,33 +198,33 @@ class XlaAssignVariableOp : public AsyncOpKernel {
\
REGISTER_KERNEL_BUILDER( \
Name("GeneratorDataset").Device(DEVICE).HostMemory("handle"), \
- GeneratorDatasetOp); \
+ data::GeneratorDatasetOp); \
REGISTER_KERNEL_BUILDER(Name("PrefetchDataset") \
.Device(DEVICE) \
.HostMemory("buffer_size") \
.HostMemory("input_dataset") \
.HostMemory("handle"), \
- PrefetchDatasetOp); \
+ data::PrefetchDatasetOp); \
\
REGISTER_KERNEL_BUILDER(Name("IteratorV2").Device(DEVICE), \
- IteratorHandleOp); \
+ data::IteratorHandleOp); \
REGISTER_KERNEL_BUILDER( \
Name("MakeIterator").Device(DEVICE).HostMemory("dataset"), \
- MakeIteratorOp); \
+ data::MakeIteratorOp); \
REGISTER_KERNEL_BUILDER(Name("AnonymousIterator").Device(DEVICE), \
- AnonymousIteratorHandleOp); \
+ data::AnonymousIteratorHandleOp); \
REGISTER_KERNEL_BUILDER(Name("IteratorGetNext").Device(DEVICE), \
- IteratorGetNextOp); \
+ data::IteratorGetNextOp); \
REGISTER_KERNEL_BUILDER(Name("IteratorGetNextSync").Device(DEVICE), \
- IteratorGetNextSyncOp); \
+ data::IteratorGetNextSyncOp); \
REGISTER_KERNEL_BUILDER(Name("IteratorToStringHandle") \
.Device(DEVICE) \
.HostMemory("string_handle"), \
- IteratorToStringHandleOp); \
+ data::IteratorToStringHandleOp); \
REGISTER_KERNEL_BUILDER(Name("IteratorFromStringHandleV2") \
.Device(DEVICE) \
.HostMemory("string_handle"), \
- IteratorFromStringHandleOp); \
+ data::IteratorFromStringHandleOp); \
REGISTER_KERNEL_BUILDER(Name(FunctionLibraryDefinition::kArgOp) \
.Device(DEVICE) \
.HostMemory("output") \
diff --git a/tensorflow/compiler/jit/xla_fusion_optimizer.cc b/tensorflow/compiler/jit/xla_fusion_optimizer.cc
index 07cfab6151..bc0db558d8 100644
--- a/tensorflow/compiler/jit/xla_fusion_optimizer.cc
+++ b/tensorflow/compiler/jit/xla_fusion_optimizer.cc
@@ -20,6 +20,7 @@ limitations under the License.
#include <unordered_map>
#include <unordered_set>
+#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/jit/deadness_analysis.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/jit/graphcycles/graphcycles.h"
@@ -326,7 +327,7 @@ Status XlaFusionOptimizer::Optimize(grappler::Cluster* cluster,
string& name = cluster_names[cluster];
if (name.empty()) {
- name = strings::StrCat("cluster_", cluster_sequence_num++);
+ name = absl::StrCat("cluster_", cluster_sequence_num++);
}
n->AddAttr(kXlaClusterAttr, name);
VLOG(3) << "Assigning node " << n->name() << " to cluster " << name;
diff --git a/tensorflow/compiler/jit/xla_tensor.h b/tensorflow/compiler/jit/xla_tensor.h
index 4c9bb2e27b..d95da63405 100644
--- a/tensorflow/compiler/jit/xla_tensor.h
+++ b/tensorflow/compiler/jit/xla_tensor.h
@@ -122,7 +122,7 @@ class XlaTensor {
std::shared_ptr<se::Event> definition_event_;
// A list of all streams for which the tensor's content is defined for any
// newly enqueued command.
- gtl::InlinedVector<se::Stream*, 2> streams_defined_on_ GUARDED_BY(mu_);
+ absl::InlinedVector<se::Stream*, 2> streams_defined_on_ GUARDED_BY(mu_);
mutex mu_;
};
diff --git a/tensorflow/compiler/tests/BUILD b/tensorflow/compiler/tests/BUILD
index 34defe1c7a..050d827a09 100644
--- a/tensorflow/compiler/tests/BUILD
+++ b/tensorflow/compiler/tests/BUILD
@@ -1103,6 +1103,7 @@ cc_library(
"//tensorflow/core:test",
"//tensorflow/core:testlib",
"//tensorflow/core/kernels:ops_util",
+ "@com_google_absl//absl/strings",
],
)
diff --git a/tensorflow/compiler/tests/randomized_tests.cc b/tensorflow/compiler/tests/randomized_tests.cc
index 0faf0fd8ed..bddda6f302 100644
--- a/tensorflow/compiler/tests/randomized_tests.cc
+++ b/tensorflow/compiler/tests/randomized_tests.cc
@@ -45,6 +45,8 @@ limitations under the License.
#include <random>
#include <unordered_map>
+#include "absl/strings/str_cat.h"
+#include "absl/strings/string_view.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/core/common_runtime/device.h"
@@ -61,7 +63,6 @@ limitations under the License.
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
-#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/gtl/flatset.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session.h"
@@ -81,7 +82,7 @@ string* tf_xla_test_device_ptr; // initial value set in main()
bool tf_xla_test_use_jit = true;
string LocalDeviceToFullDeviceName(const string& device) {
- return strings::StrCat("/job:localhost/replica:0/task:0/device:", device);
+ return absl::StrCat("/job:localhost/replica:0/task:0/device:", device);
}
constexpr std::array<DataType, 5> kAllXlaTypes = {
@@ -107,11 +108,12 @@ class OpTestBuilder {
// Sets an attribute.
template <class T>
- OpTestBuilder& Attr(StringPiece attr_name, T&& value);
+ OpTestBuilder& Attr(absl::string_view attr_name, T&& value);
// Overload needed to allow {...} expressions for value.
template <class T>
- OpTestBuilder& Attr(StringPiece attr_name, std::initializer_list<T> value);
+ OpTestBuilder& Attr(absl::string_view attr_name,
+ std::initializer_list<T> value);
// Adds nodes that executes the operator under test on 'device' to 'graphdef'.
// If 'use_jit' is true, marks the operator under test to be compiled by XLA.
@@ -185,13 +187,13 @@ OpTestBuilder& OpTestBuilder::RandomUniqueInput(DataType type,
}
template <class T>
-OpTestBuilder& OpTestBuilder::Attr(StringPiece attr_name, T&& value) {
+OpTestBuilder& OpTestBuilder::Attr(absl::string_view attr_name, T&& value) {
AddNodeAttr(attr_name, std::forward<T>(value), &node_def_);
return *this;
}
template <class T>
-OpTestBuilder& OpTestBuilder::Attr(StringPiece attr_name,
+OpTestBuilder& OpTestBuilder::Attr(absl::string_view attr_name,
std::initializer_list<T> value) {
Attr<std::initializer_list<T>>(attr_name, std::move(value));
return *this;
@@ -209,7 +211,7 @@ Status OpTestBuilder::BuildGraph(const string& name_prefix,
NodeDef* test_def = graphdef->add_node();
*test_def = node_def_;
- test_def->set_name(strings::StrCat(name_prefix, "_op_under_test"));
+ test_def->set_name(absl::StrCat(name_prefix, "_op_under_test"));
test_def->set_device(device);
AddDefaultsToNodeDef(*op_def, test_def);
if (use_jit) {
@@ -224,7 +226,7 @@ Status OpTestBuilder::BuildGraph(const string& name_prefix,
// Build feed and fetch nodes.
for (int i = 0; i < input_types.size(); ++i) {
NodeDef* def = graphdef->add_node();
- string name = strings::StrCat(name_prefix, "_input_", i);
+ string name = absl::StrCat(name_prefix, "_input_", i);
TF_RETURN_IF_ERROR(NodeDefBuilder(name, "Placeholder")
.Device(device)
.Attr("dtype", input_types[i])
@@ -235,7 +237,7 @@ Status OpTestBuilder::BuildGraph(const string& name_prefix,
for (int i = 0; i < output_types.size(); ++i) {
NodeDef* def = graphdef->add_node();
- string name = strings::StrCat(name_prefix, "_output_", i);
+ string name = absl::StrCat(name_prefix, "_output_", i);
TF_RETURN_IF_ERROR(NodeDefBuilder(name, "Identity")
.Device(device)
.Attr("T", output_types[i])
@@ -726,11 +728,11 @@ bool IsClose<complex64>(const complex64& x, const complex64& y, double atol,
template <typename T>
string Str(T x) {
- return strings::StrCat(x);
+ return absl::StrCat(x);
}
template <>
string Str<complex64>(complex64 x) {
- return strings::StrCat("(", x.real(), ", ", x.imag(), ")");
+ return absl::StrCat("(", x.real(), ", ", x.imag(), ")");
}
template <typename T>
@@ -740,11 +742,11 @@ Status TensorsAreCloseImpl(const Tensor& x, const Tensor& y, double atol,
auto Ty = y.flat<T>();
for (int i = 0; i < Tx.size(); ++i) {
if (!IsClose(Tx(i), Ty(i), atol, rtol)) {
- return errors::InvalidArgument(strings::StrCat(
- i, "-th tensor element isn't close: ", Str(Tx(i)), " vs. ",
- Str(Ty(i)), ". x = ", x.DebugString(), "y = ", y.DebugString(),
- "atol = ", atol, " rtol = ", rtol,
- " tol = ", atol + rtol * Abs(Tx(i))));
+ return errors::InvalidArgument(
+ absl::StrCat(i, "-th tensor element isn't close: ", Str(Tx(i)),
+ " vs. ", Str(Ty(i)), ". x = ", x.DebugString(),
+ "y = ", y.DebugString(), "atol = ", atol,
+ " rtol = ", rtol, " tol = ", atol + rtol * Abs(Tx(i))));
}
}
return Status::OK();
@@ -756,7 +758,7 @@ Status TensorsAreEqualImpl(const Tensor& x, const Tensor& y) {
auto Ty = y.flat<T>();
for (int i = 0; i < Tx.size(); ++i) {
if (Tx(i) != Ty(i)) {
- return errors::InvalidArgument(strings::StrCat(
+ return errors::InvalidArgument(absl::StrCat(
i, "-th tensor element isn't equal: ", Tx(i), " vs. ", Ty(i),
". x = ", x.DebugString(), "y = ", y.DebugString()));
}
@@ -771,14 +773,14 @@ Status TensorsAreEqualImpl(const Tensor& x, const Tensor& y) {
Status TensorsAreClose(const Tensor& a, const Tensor& b, double atol,
double rtol) {
if (a.dtype() != b.dtype()) {
- return errors::InvalidArgument(strings::StrCat(
+ return errors::InvalidArgument(absl::StrCat(
"Tensors have different types: ", DataTypeString(a.dtype()), " and ",
DataTypeString(b.dtype())));
}
if (!a.IsSameSize(b)) {
- return errors::InvalidArgument(strings::StrCat(
- "Tensors have different shapes: ", a.shape().DebugString(), " and ",
- b.shape().DebugString()));
+ return errors::InvalidArgument(
+ absl::StrCat("Tensors have different shapes: ", a.shape().DebugString(),
+ " and ", b.shape().DebugString()));
}
switch (a.dtype()) {
@@ -827,7 +829,7 @@ OpTest::TestResult OpTest::ExpectTfAndXlaOutputsAreClose(
}
string cpu_device =
- LocalDeviceToFullDeviceName(strings::StrCat(DEVICE_CPU, ":0"));
+ LocalDeviceToFullDeviceName(absl::StrCat(DEVICE_CPU, ":0"));
string test_device = LocalDeviceToFullDeviceName(*tf_xla_test_device_ptr);
DeviceNameUtils::ParsedName parsed_name;
@@ -842,7 +844,7 @@ OpTest::TestResult OpTest::ExpectTfAndXlaOutputsAreClose(
std::vector<string> expected_inputs, test_inputs;
std::vector<string> expected_fetches, test_fetches;
Status status = builder.BuildGraph(
- strings::StrCat("test", num_tests_, "_expected"), cpu_device,
+ absl::StrCat("test", num_tests_, "_expected"), cpu_device,
/* use_jit= */ false, &graph, /* test_node_def= */ nullptr,
&expected_inputs, &expected_fetches);
if (!status.ok()) {
@@ -851,7 +853,7 @@ OpTest::TestResult OpTest::ExpectTfAndXlaOutputsAreClose(
}
NodeDef* node_def;
- status = builder.BuildGraph(strings::StrCat("test", num_tests_, "_test"),
+ status = builder.BuildGraph(absl::StrCat("test", num_tests_, "_test"),
test_device, tf_xla_test_use_jit, &graph,
&node_def, &test_inputs, &test_fetches);
if (!status.ok()) {
diff --git a/tensorflow/compiler/tests/xla_ops_test.py b/tensorflow/compiler/tests/xla_ops_test.py
index b2f026df6c..3f928a1bea 100644
--- a/tensorflow/compiler/tests/xla_ops_test.py
+++ b/tensorflow/compiler/tests/xla_ops_test.py
@@ -97,9 +97,9 @@ class XlaOpsTest(xla_test.XLATestCase, parameterized.TestCase):
args=(np.array([0xFFFFFFFF, 16], dtype=np.uint32), np.uint32(4)),
expected=np.array([0xFFFFFFFF, 1], dtype=np.uint32))
- PRECISION_VALUES = (None, xla_data_pb2.PrecisionConfigProto.DEFAULT,
- xla_data_pb2.PrecisionConfigProto.HIGH,
- xla_data_pb2.PrecisionConfigProto.HIGHEST)
+ PRECISION_VALUES = (None, xla_data_pb2.PrecisionConfig.DEFAULT,
+ xla_data_pb2.PrecisionConfig.HIGH,
+ xla_data_pb2.PrecisionConfig.HIGHEST)
@parameterized.parameters(*PRECISION_VALUES)
def testConv(self, precision):
@@ -120,7 +120,7 @@ class XlaOpsTest(xla_test.XLATestCase, parameterized.TestCase):
dnums.output_spatial_dimensions.extend(range(2, 2 + num_spatial_dims))
precision_config = None
if precision:
- precision_config = xla_data_pb2.PrecisionConfigProto()
+ precision_config = xla_data_pb2.PrecisionConfig()
precision_config.operand_precision.extend([precision, precision])
return xla.conv(
lhs,
@@ -151,7 +151,7 @@ class XlaOpsTest(xla_test.XLATestCase, parameterized.TestCase):
dnums.rhs_batch_dimensions.append(0)
precision_config = None
if precision:
- precision_config = xla_data_pb2.PrecisionConfigProto()
+ precision_config = xla_data_pb2.PrecisionConfig()
precision_config.operand_precision.extend([precision, precision])
return xla.dot_general(
lhs,
diff --git a/tensorflow/compiler/tf2xla/BUILD b/tensorflow/compiler/tf2xla/BUILD
index 0797b2cb17..22be7f048f 100644
--- a/tensorflow/compiler/tf2xla/BUILD
+++ b/tensorflow/compiler/tf2xla/BUILD
@@ -291,6 +291,7 @@ cc_library(
"//tensorflow/core:graph",
"//tensorflow/core:lib",
"//tensorflow/core:protos_all_cc",
+ "@com_google_absl//absl/strings",
"@com_google_absl//absl/types:optional",
],
)
@@ -433,6 +434,7 @@ cc_library(
"//tensorflow/core:framework_internal",
"//tensorflow/core:lib",
"//tensorflow/core:protos_all_cc",
+ "@com_google_absl//absl/strings",
],
)
@@ -609,11 +611,10 @@ cc_library(
srcs = ["resource_operation_table.cc"],
hdrs = ["resource_operation_table.h"],
deps = [
- "//tensorflow/core:framework",
"//tensorflow/core:lib",
"//tensorflow/core:ops",
- "//tensorflow/core:protos_all_cc",
"@com_google_absl//absl/algorithm:container",
+ "@com_google_absl//absl/strings",
],
)
diff --git a/tensorflow/compiler/tf2xla/dump_graph.cc b/tensorflow/compiler/tf2xla/dump_graph.cc
index 24616c01c7..380c6a7e23 100644
--- a/tensorflow/compiler/tf2xla/dump_graph.cc
+++ b/tensorflow/compiler/tf2xla/dump_graph.cc
@@ -18,8 +18,8 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/dump_graph.h"
+#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/tf2xla/dump_graph_flags.h"
-#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/mutex.h"
@@ -52,9 +52,9 @@ string MakeUniqueFilename(string name) {
string filename = name;
if (count > 0) {
- strings::StrAppend(&filename, "_", count);
+ absl::StrAppend(&filename, "_", count);
}
- strings::StrAppend(&filename, ".pbtxt");
+ absl::StrAppend(&filename, ".pbtxt");
return filename;
}
@@ -69,7 +69,7 @@ string WriteTextProtoToUniqueFile(
<< proto_type << ": " << status;
return "(unavailable)";
}
- string filepath = strings::StrCat(dirname, "/", MakeUniqueFilename(name));
+ string filepath = absl::StrCat(dirname, "/", MakeUniqueFilename(name));
status = WriteTextProto(Env::Default(), filepath, proto);
if (!status.ok()) {
LOG(WARNING) << "Failed to dump " << proto_type << " to file: " << filepath
diff --git a/tensorflow/compiler/tf2xla/functionalize_cond.cc b/tensorflow/compiler/tf2xla/functionalize_cond.cc
index e2affee51f..0911550f1f 100644
--- a/tensorflow/compiler/tf2xla/functionalize_cond.cc
+++ b/tensorflow/compiler/tf2xla/functionalize_cond.cc
@@ -42,7 +42,7 @@ namespace functionalize_cond {
// TODO(jpienaar): Move to OutputTensor.
string DebugString(const OutputTensor& tensor) {
- return strings::StrCat(tensor.node->name(), ":", tensor.index);
+ return absl::StrCat(tensor.node->name(), ":", tensor.index);
}
string Branch_Name(BranchType b) {
@@ -61,17 +61,17 @@ string Branch_Name(BranchType b) {
string DebugString(StateMap::CondId cond_state) {
if (cond_state == nullptr || cond_state->empty()) return "{}";
using value_type = StateMap::CondState::value_type;
- return strings::StrCat(
+ return absl::StrCat(
"{",
absl::StrJoin(*cond_state, ", ",
[](string* output, const value_type& pred_branch) {
const OutputTensor& pred = pred_branch.first;
const BranchType& branch = pred_branch.second;
if (branch == BranchType::kNeither)
- strings::StrAppend(output, "d");
+ absl::StrAppend(output, "d");
else
- strings::StrAppend(output, "s(", DebugString(pred), ",",
- Branch_Name(branch), ")");
+ absl::StrAppend(output, "s(", DebugString(pred), ",",
+ Branch_Name(branch), ")");
}),
"}");
}
@@ -159,8 +159,8 @@ struct CondArgNode {
: src(src), src_output(src_output) {}
string ToString() const {
- return strings::StrCat("src=", src->name(), ":", src_output,
- " switches=", NodesToString(switches));
+ return absl::StrCat("src=", src->name(), ":", src_output,
+ " switches=", NodesToString(switches));
}
Node* src;
@@ -171,11 +171,11 @@ struct CondArgNode {
using CondArgNodes = std::vector<CondArgNode>;
string DebugString(const CondArgNodes& nodes) {
- return strings::StrCat(
+ return absl::StrCat(
"[",
absl::StrJoin(nodes, ", ",
[](string* output, const CondArgNode& node) {
- strings::StrAppend(output, node.ToString());
+ absl::StrAppend(output, node.ToString());
}),
"]");
}
@@ -373,7 +373,7 @@ Status Conditional::BuildArgumentNodes() {
for (auto branch : {BranchType::kElseBranch, BranchType::kThenBranch}) {
int branch_index = static_cast<int>(branch);
TF_RETURN_IF_ERROR(
- NodeBuilder(strings::StrCat("_Arg", arg_count),
+ NodeBuilder(absl::StrCat("_Arg", arg_count),
FunctionLibraryDefinition::kArgOp)
.Attr("T", dtype)
.Attr("index", arg_count)
@@ -441,7 +441,7 @@ Status Conditional::AddSwitchNodeAlongEdge(const Edge* edge, BranchType branch,
Node* src = edge->src();
int src_output = edge->src_output();
TF_RETURN_IF_ERROR(
- NodeBuilder(graph->NewName(strings::StrCat(src->name(), "_added_switch")),
+ NodeBuilder(graph->NewName(absl::StrCat(src->name(), "_added_switch")),
"Switch")
.Input(src, src_output)
.Input(const_cast<Node*>(predicate_.node), predicate_.index)
@@ -650,8 +650,8 @@ Status Conditional::BuildIfNode(Graph* graph,
int64 id = ++sequence_num;
NameAttrList body_name;
- body_name.set_name(strings::StrCat("_functionalize_if_",
- branch_name[branch_index], "_", id));
+ body_name.set_name(
+ absl::StrCat("_functionalize_if_", branch_name[branch_index], "_", id));
VLOG(3) << "FunctionalizeControlFlow (" << branch_name[branch_index]
<< "): "
@@ -804,7 +804,7 @@ Status Conditional::BuildAndReplace(Graph* graph,
string Conditional::name() const {
CHECK(!merges_.empty());
- return strings::StrCat((*merges_.begin())->name(), "_if");
+ return absl::StrCat((*merges_.begin())->name(), "_if");
}
Status FunctionalizeCond::AddIdentityNode(const Node* replacee, Node* if_node,
@@ -1327,12 +1327,12 @@ void FunctionalizeCond::DumpGraphWithCondState(const string& name) {
for (Node* n : graph_->nodes()) {
n->ClearAttr(kCondGroupDebugAttr);
n->AddAttr(kCondGroupDebugAttr,
- strings::StrCat(state_map_.CondStateToString(n), "_",
- state_map_.AncestorStateToString(n)));
+ absl::StrCat(state_map_.CondStateToString(n), "_",
+ state_map_.AncestorStateToString(n)));
}
LOG(INFO) << "FunctionalizeControlFlow (" << name << "): "
- << dump_graph::DumpGraphToFile(
- strings::StrCat("functionalize_", name), *graph_, library_);
+ << dump_graph::DumpGraphToFile(absl::StrCat("functionalize_", name),
+ *graph_, library_);
}
Status FunctionalizeCond::Functionalize(Graph* graph,
diff --git a/tensorflow/compiler/tf2xla/functionalize_control_flow_util.cc b/tensorflow/compiler/tf2xla/functionalize_control_flow_util.cc
index 924fcdd9cd..54cebc6177 100644
--- a/tensorflow/compiler/tf2xla/functionalize_control_flow_util.cc
+++ b/tensorflow/compiler/tf2xla/functionalize_control_flow_util.cc
@@ -42,7 +42,7 @@ xla::StatusOr<Node*> BuildRetvalNode(Graph* graph, DataType type, int index) {
const char* const kRetValOp = "_Retval";
NodeDef ret_def;
ret_def.set_op(kRetValOp);
- ret_def.set_name(strings::StrCat(kRetValOp, index));
+ ret_def.set_name(absl::StrCat(kRetValOp, index));
AddNodeAttr("T", type, &ret_def);
AddNodeAttr("index", index, &ret_def);
return AddNodeDefToGraph(ret_def, graph);
diff --git a/tensorflow/compiler/tf2xla/functionalize_control_flow_util.h b/tensorflow/compiler/tf2xla/functionalize_control_flow_util.h
index 61940e3586..582b49d511 100644
--- a/tensorflow/compiler/tf2xla/functionalize_control_flow_util.h
+++ b/tensorflow/compiler/tf2xla/functionalize_control_flow_util.h
@@ -43,13 +43,12 @@ xla::StatusOr<Node*> BuildRetvalNode(Graph* graph, DataType type, int index);
// Returns a textual representation of the names of the nodes in the input.
template <typename T>
string NodesToString(const T& nodes) {
- return strings::StrCat("{",
- absl::StrJoin(nodes, ",",
- [](string* output, const Node* node) {
- strings::StrAppend(output,
- node->name());
- }),
- "}");
+ return absl::StrCat("{",
+ absl::StrJoin(nodes, ",",
+ [](string* output, const Node* node) {
+ absl::StrAppend(output, node->name());
+ }),
+ "}");
}
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/functionalize_while.cc b/tensorflow/compiler/tf2xla/functionalize_while.cc
index 6e3c4b0e0f..7f45e3bffa 100644
--- a/tensorflow/compiler/tf2xla/functionalize_while.cc
+++ b/tensorflow/compiler/tf2xla/functionalize_while.cc
@@ -132,7 +132,7 @@ Status CopySubgraph(const Graph& graph, const Frame* frame,
StatusOr<Node*> BuildArgNode(Graph* graph, DataType type, int index) {
const char* const kArgOp = "_Arg";
NodeDef arg_def;
- NodeDefBuilder builder(strings::StrCat(kArgOp, index), kArgOp);
+ NodeDefBuilder builder(absl::StrCat(kArgOp, index), kArgOp);
builder.Attr("T", type);
builder.Attr("index", index);
TF_RETURN_IF_ERROR(builder.Finalize(&arg_def));
@@ -487,9 +487,9 @@ Status FunctionalizeLoop(const FunctionLibraryDefinition* lookup_library,
static std::atomic<int64> sequence_num(0LL);
int64 id = ++sequence_num;
NameAttrList cond_name;
- cond_name.set_name(strings::StrCat("_functionalize_cond_", id));
+ cond_name.set_name(absl::StrCat("_functionalize_cond_", id));
NameAttrList body_name;
- body_name.set_name(strings::StrCat("_functionalize_body_", id));
+ body_name.set_name(absl::StrCat("_functionalize_body_", id));
FunctionDef cond_fdef;
TF_RETURN_IF_ERROR(
GraphToFunctionDef(*cond_graph, cond_name.name(), &cond_fdef));
diff --git a/tensorflow/compiler/tf2xla/graph_compiler.cc b/tensorflow/compiler/tf2xla/graph_compiler.cc
index 1ed1fb3b02..bc2e640559 100644
--- a/tensorflow/compiler/tf2xla/graph_compiler.cc
+++ b/tensorflow/compiler/tf2xla/graph_compiler.cc
@@ -127,7 +127,7 @@ Status GraphCompiler::Compile() {
TF_RET_CHECK(!n->IsRecv() && !n->IsSend() && !n->IsSwitch())
<< "Not supported node: " << n->DebugString();
params.op_kernel = op_kernel.get();
- gtl::InlinedVector<AllocatorAttributes, 4> output_attr(n->num_outputs());
+ absl::InlinedVector<AllocatorAttributes, 4> output_attr(n->num_outputs());
params.output_attr_array = output_attr.data();
// tensor_inputs_ is a buffer reused across graph traversal. We clean up and
diff --git a/tensorflow/compiler/tf2xla/graph_compiler.h b/tensorflow/compiler/tf2xla/graph_compiler.h
index 127562eb23..ab7cac7100 100644
--- a/tensorflow/compiler/tf2xla/graph_compiler.h
+++ b/tensorflow/compiler/tf2xla/graph_compiler.h
@@ -89,7 +89,7 @@ class GraphCompiler {
ScopedStepContainer* step_container_;
// A buffer to hold tensor inputs to a node, this is reused across the graph
// traversal.
- gtl::InlinedVector<TensorValue, 4> tensor_inputs_;
+ absl::InlinedVector<TensorValue, 4> tensor_inputs_;
};
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/kernels/batchtospace_op.cc b/tensorflow/compiler/tf2xla/kernels/batchtospace_op.cc
index edced6bc0e..a18e04995b 100644
--- a/tensorflow/compiler/tf2xla/kernels/batchtospace_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/batchtospace_op.cc
@@ -26,7 +26,7 @@ void BatchToSpace(XlaOpKernelContext* ctx, const xla::XlaOp& input,
absl::Span<const int64> block_shape,
const xla::Literal& crops) {
const int input_rank = input_tensor_shape.dims();
- const gtl::InlinedVector<int64, 4> input_shape =
+ const absl::InlinedVector<int64, 4> input_shape =
input_tensor_shape.dim_sizes();
const int block_rank = block_shape.size();
diff --git a/tensorflow/compiler/tf2xla/kernels/bcast_ops.cc b/tensorflow/compiler/tf2xla/kernels/bcast_ops.cc
index 2e383b1473..182f7c9934 100644
--- a/tensorflow/compiler/tf2xla/kernels/bcast_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/bcast_ops.cc
@@ -39,7 +39,7 @@ class BCastArgsOp : public XlaOpKernel {
OP_REQUIRES(
ctx, ctx->num_inputs() == 2,
errors::Unimplemented("Broadcast for n-ary operations (n > 2)"));
- gtl::InlinedVector<BCast::Vec, 2> shapes;
+ absl::InlinedVector<BCast::Vec, 2> shapes;
for (int i = 0; i < ctx->num_inputs(); ++i) {
const TensorShape in_shape = ctx->InputShape(i);
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(in_shape),
@@ -88,7 +88,7 @@ class BCastGradArgsOp : public XlaOpKernel {
ctx, ctx->num_inputs() == 2,
errors::Unimplemented("Broadcast for n-ary operations (n > 2)"));
- gtl::InlinedVector<BCast::Vec, 4> shapes;
+ absl::InlinedVector<BCast::Vec, 4> shapes;
for (int i = 0; i < ctx->num_inputs(); ++i) {
const TensorShape in_shape = ctx->InputShape(i);
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(in_shape),
diff --git a/tensorflow/compiler/tf2xla/kernels/depthtospace_op.cc b/tensorflow/compiler/tf2xla/kernels/depthtospace_op.cc
index 12b0e38288..e96a1adce4 100644
--- a/tensorflow/compiler/tf2xla/kernels/depthtospace_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/depthtospace_op.cc
@@ -48,7 +48,7 @@ class DepthToSpaceOp : public XlaOpKernel {
OP_REQUIRES(ctx, kRequiredDims == input_rank,
errors::InvalidArgument("Input rank should be ", kRequiredDims,
"; got: ", input_rank));
- const gtl::InlinedVector<int64, 4> input_shape =
+ const absl::InlinedVector<int64, 4> input_shape =
input_tensor_shape.dim_sizes();
xla::XlaOp input = ctx->Input(0);
diff --git a/tensorflow/compiler/tf2xla/kernels/pooling_ops.cc b/tensorflow/compiler/tf2xla/kernels/pooling_ops.cc
index f6f158a73b..27690c156e 100644
--- a/tensorflow/compiler/tf2xla/kernels/pooling_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/pooling_ops.cc
@@ -138,7 +138,7 @@ xla::TensorFormat XlaTensorFormat(tensorflow::TensorFormat data_format,
int num_dims = num_spatial_dims + 2;
int batch_dimension = GetTensorBatchDimIndex(num_dims, data_format);
int feature_dimension = GetTensorFeatureDimIndex(num_dims, data_format);
- gtl::InlinedVector<int64, 4> spatial_dimensions(num_spatial_dims);
+ absl::InlinedVector<int64, 4> spatial_dimensions(num_spatial_dims);
for (int spatial_dim = 0; spatial_dim < num_spatial_dims; ++spatial_dim) {
spatial_dimensions[spatial_dim] =
GetTensorSpatialDimIndex(num_dims, data_format, spatial_dim);
diff --git a/tensorflow/compiler/tf2xla/kernels/reduction_ops_common.cc b/tensorflow/compiler/tf2xla/kernels/reduction_ops_common.cc
index 598248563b..118f2798d5 100644
--- a/tensorflow/compiler/tf2xla/kernels/reduction_ops_common.cc
+++ b/tensorflow/compiler/tf2xla/kernels/reduction_ops_common.cc
@@ -69,7 +69,7 @@ void XlaReductionOp::Compile(XlaOpKernelContext* ctx) {
VLOG(1) << "data shape: " << data_shape.DebugString();
VLOG(1) << "axes : " << absl::StrJoin(axes, ",");
- gtl::InlinedVector<bool, 4> bitmap(data_shape.dims(), false);
+ absl::InlinedVector<bool, 4> bitmap(data_shape.dims(), false);
std::vector<int64> xla_axes;
int64 num_elements_reduced = 1LL;
for (int64 i = 0; i < axes_tensor_shape.num_elements(); ++i) {
@@ -103,7 +103,7 @@ void XlaReductionOp::Compile(XlaOpKernelContext* ctx) {
xla::XlaBuilder* const b = ctx->builder();
// Construct the builder for the reduction lambda.
- xla::XlaBuilder r(strings::StrCat(desc, "-reduction"));
+ xla::XlaBuilder r(absl::StrCat(desc, "-reduction"));
xla::PrimitiveType type;
TF_CHECK_OK(DataTypeToPrimitiveType(reduction_type_, &type));
diff --git a/tensorflow/compiler/tf2xla/kernels/reverse_op.cc b/tensorflow/compiler/tf2xla/kernels/reverse_op.cc
index c0afccaa5b..8494864b33 100644
--- a/tensorflow/compiler/tf2xla/kernels/reverse_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/reverse_op.cc
@@ -97,7 +97,7 @@ class ReverseV2Op : public XlaOpKernel {
// witnessed_axes is used to ensure that the same axis is not marked to be
// reversed multiple times.
- gtl::InlinedVector<bool, 8> witnessed_axes(x_shape.dims(), false);
+ absl::InlinedVector<bool, 8> witnessed_axes(x_shape.dims(), false);
for (int d = 0; d < axes.size(); ++d) {
OP_REQUIRES(
diff --git a/tensorflow/compiler/tf2xla/kernels/shape_op.cc b/tensorflow/compiler/tf2xla/kernels/shape_op.cc
index 4e0cf99d8e..2e0a69b70e 100644
--- a/tensorflow/compiler/tf2xla/kernels/shape_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/shape_op.cc
@@ -115,7 +115,7 @@ class ExpandDimsOp : public XlaOpKernel {
// accept legacy scalars, even when they should be forbidden by the graphdef
// version.
OP_REQUIRES(ctx, dim_shape.num_elements() == 1,
- errors::InvalidArgument(strings::StrCat(
+ errors::InvalidArgument(absl::StrCat(
"dim input to ExpandDims must be a scalar; got ",
dim_shape.DebugString())));
diff --git a/tensorflow/compiler/tf2xla/kernels/spacetobatch_op.cc b/tensorflow/compiler/tf2xla/kernels/spacetobatch_op.cc
index b7b4f3a546..76b79be6f6 100644
--- a/tensorflow/compiler/tf2xla/kernels/spacetobatch_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/spacetobatch_op.cc
@@ -26,7 +26,7 @@ void SpaceToBatch(XlaOpKernelContext* ctx, const xla::XlaOp& input,
absl::Span<const int64> block_shape,
const xla::Literal& paddings) {
const int input_rank = input_tensor_shape.dims();
- const gtl::InlinedVector<int64, 4> input_shape =
+ const absl::InlinedVector<int64, 4> input_shape =
input_tensor_shape.dim_sizes();
const int block_rank = block_shape.size();
diff --git a/tensorflow/compiler/tf2xla/kernels/spacetodepth_op.cc b/tensorflow/compiler/tf2xla/kernels/spacetodepth_op.cc
index 4493539fe3..3293c13b21 100644
--- a/tensorflow/compiler/tf2xla/kernels/spacetodepth_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/spacetodepth_op.cc
@@ -48,7 +48,7 @@ class SpaceToDepthOp : public XlaOpKernel {
OP_REQUIRES(ctx, kRequiredDims == input_rank,
errors::InvalidArgument("Input rank should be ", kRequiredDims,
"; got ", input_rank));
- const gtl::InlinedVector<int64, 4> input_shape =
+ const absl::InlinedVector<int64, 4> input_shape =
input_tensor_shape.dim_sizes();
xla::XlaOp input = ctx->Input(0);
diff --git a/tensorflow/compiler/tf2xla/kernels/stack_ops.cc b/tensorflow/compiler/tf2xla/kernels/stack_ops.cc
index df91900570..ee70f508a9 100644
--- a/tensorflow/compiler/tf2xla/kernels/stack_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/stack_ops.cc
@@ -111,7 +111,7 @@ class StackOp : public XlaOpKernel {
xla::XlaOp value;
XlaContext& xc = XlaContext::Get(ctx);
XlaResource* resource;
- string name = strings::StrCat("Stack: ", stack_name_);
+ string name = absl::StrCat("Stack: ", stack_name_);
OP_REQUIRES_OK(
ctx, xc.CreateResource(XlaResource::kStack, -1, std::move(name), dtype_,
TensorShape(), value, /*tensor_array_size=*/size,
diff --git a/tensorflow/compiler/tf2xla/kernels/strided_slice_op.cc b/tensorflow/compiler/tf2xla/kernels/strided_slice_op.cc
index 472d4744d7..2b2e3de64f 100644
--- a/tensorflow/compiler/tf2xla/kernels/strided_slice_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/strided_slice_op.cc
@@ -46,9 +46,9 @@ class StridedSliceOp : public XlaOpKernel {
const TensorShape input_shape = ctx->InputShape(0);
TensorShape final_shape;
- gtl::InlinedVector<int64, 4> begin;
- gtl::InlinedVector<int64, 4> end;
- gtl::InlinedVector<int64, 4> strides;
+ absl::InlinedVector<int64, 4> begin;
+ absl::InlinedVector<int64, 4> end;
+ absl::InlinedVector<int64, 4> strides;
xla::Literal begin_literal, end_literal, strides_literal;
OP_REQUIRES_OK(ctx, ctx->ConstantInput(1, &begin_literal));
@@ -72,8 +72,8 @@ class StridedSliceOp : public XlaOpKernel {
shrink_axis_mask_, &dummy_processing_shape, &final_shape,
&dummy, &dummy, &dummy, &begin, &end, &strides));
- gtl::InlinedVector<int64, 4> dimensions_to_reverse;
- gtl::InlinedVector<int64, 4> slice_begin, slice_end, slice_strides;
+ absl::InlinedVector<int64, 4> dimensions_to_reverse;
+ absl::InlinedVector<int64, 4> slice_begin, slice_end, slice_strides;
for (int i = 0; i < begin.size(); ++i) {
if (strides[i] > 0) {
@@ -127,9 +127,9 @@ class StridedSliceGradOp : public XlaOpKernel {
void Compile(XlaOpKernelContext* ctx) override {
TensorShape processing_shape, final_shape;
- gtl::InlinedVector<int64, 4> begin;
- gtl::InlinedVector<int64, 4> end;
- gtl::InlinedVector<int64, 4> strides;
+ absl::InlinedVector<int64, 4> begin;
+ absl::InlinedVector<int64, 4> end;
+ absl::InlinedVector<int64, 4> strides;
TensorShape input_shape;
OP_REQUIRES_OK(ctx, ctx->ConstantInputAsShape(0, &input_shape));
@@ -175,7 +175,7 @@ class StridedSliceGradOp : public XlaOpKernel {
grad = xla::Reshape(grad, processing_shape.dim_sizes());
// Pad the input gradients.
- gtl::InlinedVector<int64, 4> dimensions_to_reverse;
+ absl::InlinedVector<int64, 4> dimensions_to_reverse;
xla::PaddingConfig padding_config;
for (int i = 0; i < processing_shape.dims(); ++i) {
@@ -238,9 +238,9 @@ class StridedSliceAssignOp : public XlaOpKernel {
void Compile(XlaOpKernelContext* ctx) override {
TensorShape final_shape;
- gtl::InlinedVector<int64, 4> begin;
- gtl::InlinedVector<int64, 4> end;
- gtl::InlinedVector<int64, 4> strides;
+ absl::InlinedVector<int64, 4> begin;
+ absl::InlinedVector<int64, 4> end;
+ absl::InlinedVector<int64, 4> strides;
xla::Literal begin_literal, end_literal, strides_literal;
OP_REQUIRES_OK(ctx, ctx->ConstantInput(1, &begin_literal));
@@ -287,8 +287,8 @@ class StridedSliceAssignOp : public XlaOpKernel {
xla::XlaOp rhs = ctx->Input(4);
- gtl::InlinedVector<int64, 4> dimensions_to_reverse;
- gtl::InlinedVector<int64, 4> slice_begin, slice_dims;
+ absl::InlinedVector<int64, 4> dimensions_to_reverse;
+ absl::InlinedVector<int64, 4> slice_begin, slice_dims;
for (int i = 0; i < begin.size(); ++i) {
// TODO(phawkins): implement strides != 1
OP_REQUIRES(
diff --git a/tensorflow/compiler/tf2xla/kernels/tensor_array_ops.cc b/tensorflow/compiler/tf2xla/kernels/tensor_array_ops.cc
index bb114d1aed..94108b764f 100644
--- a/tensorflow/compiler/tf2xla/kernels/tensor_array_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/tensor_array_ops.cc
@@ -167,7 +167,7 @@ class TensorArrayOp : public XlaOpKernel {
XlaContext& xc = XlaContext::Get(ctx);
XlaResource* var;
- string name = strings::StrCat("TensorArray: ", tensor_array_name_);
+ string name = absl::StrCat("TensorArray: ", tensor_array_name_);
OP_REQUIRES_OK(
ctx, xc.CreateResource(XlaResource::kTensorArray, -1, std::move(name),
dtype_, shape, value, /*tensor_array_size=*/size,
diff --git a/tensorflow/compiler/tf2xla/kernels/transpose_op.cc b/tensorflow/compiler/tf2xla/kernels/transpose_op.cc
index f9148b3942..6b303b31d4 100644
--- a/tensorflow/compiler/tf2xla/kernels/transpose_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/transpose_op.cc
@@ -61,7 +61,7 @@ class TransposeOp : public XlaOpKernel {
std::vector<int64> transposed_order;
// Check whether permutation is a permutation of integers of [0 .. dims).
- gtl::InlinedVector<bool, 8> bits(dims);
+ absl::InlinedVector<bool, 8> bits(dims);
bool is_identity = true;
for (int i = 0; i < dims; ++i) {
const int32 d = perm[i];
diff --git a/tensorflow/compiler/tf2xla/kernels/xla_conv_op.cc b/tensorflow/compiler/tf2xla/kernels/xla_conv_op.cc
index 8848623868..fecc7c556e 100644
--- a/tensorflow/compiler/tf2xla/kernels/xla_conv_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/xla_conv_op.cc
@@ -84,7 +84,7 @@ class XlaConvOp : public XlaOpKernel {
private:
xla::ConvolutionDimensionNumbers dnums_;
- xla::PrecisionConfigProto precision_config_;
+ xla::PrecisionConfig precision_config_;
TF_DISALLOW_COPY_AND_ASSIGN(XlaConvOp);
};
diff --git a/tensorflow/compiler/tf2xla/kernels/xla_dot_op.cc b/tensorflow/compiler/tf2xla/kernels/xla_dot_op.cc
index 2fed53e5c0..40b15b5579 100644
--- a/tensorflow/compiler/tf2xla/kernels/xla_dot_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/xla_dot_op.cc
@@ -54,7 +54,7 @@ class XlaDotOp : public XlaOpKernel {
private:
xla::DotDimensionNumbers dnums_;
- xla::PrecisionConfigProto precision_config_;
+ xla::PrecisionConfig precision_config_;
TF_DISALLOW_COPY_AND_ASSIGN(XlaDotOp);
};
diff --git a/tensorflow/compiler/tf2xla/lib/BUILD b/tensorflow/compiler/tf2xla/lib/BUILD
index 9365d203f0..8597e7f139 100644
--- a/tensorflow/compiler/tf2xla/lib/BUILD
+++ b/tensorflow/compiler/tf2xla/lib/BUILD
@@ -205,7 +205,7 @@ cc_library(
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla/client:xla_builder",
"//tensorflow/compiler/xla/client:xla_computation",
- "//tensorflow/core:lib",
+ "@com_google_absl//absl/strings",
"@com_google_absl//absl/types:span",
],
)
diff --git a/tensorflow/compiler/tf2xla/lib/batch_dot.cc b/tensorflow/compiler/tf2xla/lib/batch_dot.cc
index d8c050d09e..64f2d781a6 100644
--- a/tensorflow/compiler/tf2xla/lib/batch_dot.cc
+++ b/tensorflow/compiler/tf2xla/lib/batch_dot.cc
@@ -28,7 +28,7 @@ namespace tensorflow {
xla::XlaOp BatchDot(xla::XlaOp x, xla::XlaOp y, bool transpose_x,
bool transpose_y, bool conjugate_x, bool conjugate_y,
- xla::PrecisionConfigProto::Precision precision) {
+ xla::PrecisionConfig::Precision precision) {
xla::XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> xla::StatusOr<xla::XlaOp> {
TF_ASSIGN_OR_RETURN(xla::Shape x_shape, builder->GetShape(x));
@@ -96,7 +96,7 @@ xla::XlaOp BatchDot(xla::XlaOp x, xla::XlaOp y, bool transpose_x,
y = xla::Conj(y);
}
- xla::PrecisionConfigProto precision_proto;
+ xla::PrecisionConfig precision_proto;
precision_proto.add_operand_precision(precision);
precision_proto.add_operand_precision(precision);
diff --git a/tensorflow/compiler/tf2xla/lib/batch_dot.h b/tensorflow/compiler/tf2xla/lib/batch_dot.h
index 6cfccd5553..6edd63a4d3 100644
--- a/tensorflow/compiler/tf2xla/lib/batch_dot.h
+++ b/tensorflow/compiler/tf2xla/lib/batch_dot.h
@@ -43,11 +43,11 @@ namespace tensorflow {
// It is computed as:
//
// output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
-xla::XlaOp BatchDot(xla::XlaOp x, xla::XlaOp y, bool transpose_x = false,
- bool transpose_y = false, bool conjugate_x = false,
- bool conjugate_y = false,
- xla::PrecisionConfigProto::Precision precision =
- xla::PrecisionConfigProto::DEFAULT);
+xla::XlaOp BatchDot(
+ xla::XlaOp x, xla::XlaOp y, bool transpose_x = false,
+ bool transpose_y = false, bool conjugate_x = false,
+ bool conjugate_y = false,
+ xla::PrecisionConfig::Precision precision = xla::PrecisionConfig::DEFAULT);
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/lib/cholesky.cc b/tensorflow/compiler/tf2xla/lib/cholesky.cc
index c50a8de33e..ab3d0a5668 100644
--- a/tensorflow/compiler/tf2xla/lib/cholesky.cc
+++ b/tensorflow/compiler/tf2xla/lib/cholesky.cc
@@ -50,7 +50,7 @@ namespace {
// l[..., j, j]
// return l
xla::XlaOp CholeskyUnblocked(xla::XlaOp a,
- xla::PrecisionConfigProto::Precision precision) {
+ xla::PrecisionConfig::Precision precision) {
xla::XlaBuilder* builder = a.builder();
return builder->ReportErrorOrReturn([&]() -> xla::StatusOr<xla::XlaOp> {
TF_ASSIGN_OR_RETURN(xla::Shape a_shape, builder->GetShape(a));
@@ -150,7 +150,7 @@ xla::XlaOp CholeskyUnblocked(xla::XlaOp a,
} // namespace
xla::XlaOp Cholesky(xla::XlaOp a, int64 block_size,
- xla::PrecisionConfigProto::Precision precision) {
+ xla::PrecisionConfig::Precision precision) {
xla::XlaBuilder* builder = a.builder();
return builder->ReportErrorOrReturn([&]() -> xla::StatusOr<xla::XlaOp> {
TF_ASSIGN_OR_RETURN(xla::Shape a_shape, builder->GetShape(a));
diff --git a/tensorflow/compiler/tf2xla/lib/cholesky.h b/tensorflow/compiler/tf2xla/lib/cholesky.h
index 60cd7ded53..9a561c34b9 100644
--- a/tensorflow/compiler/tf2xla/lib/cholesky.h
+++ b/tensorflow/compiler/tf2xla/lib/cholesky.h
@@ -30,9 +30,9 @@ namespace tensorflow {
// TODO(phawkins): check for negative values on the diagonal and return an
// error, instead of silently yielding NaNs.
// TODO(znado): handle the complex Hermitian case
-xla::XlaOp Cholesky(xla::XlaOp a, int64 block_size = 256,
- xla::PrecisionConfigProto::Precision precision =
- xla::PrecisionConfigProto::HIGHEST);
+xla::XlaOp Cholesky(
+ xla::XlaOp a, int64 block_size = 256,
+ xla::PrecisionConfig::Precision precision = xla::PrecisionConfig::HIGHEST);
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/lib/qr.cc b/tensorflow/compiler/tf2xla/lib/qr.cc
index 0a140fa93c..6b3f2b6e06 100644
--- a/tensorflow/compiler/tf2xla/lib/qr.cc
+++ b/tensorflow/compiler/tf2xla/lib/qr.cc
@@ -150,7 +150,7 @@ struct QRBlockResult {
xla::XlaOp vs; // Shape: [..., m, n]
};
xla::StatusOr<QRBlockResult> QRBlock(
- xla::XlaOp a, xla::PrecisionConfigProto::Precision precision) {
+ xla::XlaOp a, xla::PrecisionConfig::Precision precision) {
xla::XlaBuilder* builder = a.builder();
TF_ASSIGN_OR_RETURN(xla::Shape a_shape, builder->GetShape(a));
const int num_dims = xla::ShapeUtil::Rank(a_shape);
@@ -257,7 +257,7 @@ xla::StatusOr<QRBlockResult> QRBlock(
xla::StatusOr<xla::XlaOp> ComputeWYRepresentation(
xla::PrimitiveType type, absl::Span<const int64> batch_dims, xla::XlaOp vs,
xla::XlaOp taus, int64 m, int64 n,
- xla::PrecisionConfigProto::Precision precision) {
+ xla::PrecisionConfig::Precision precision) {
std::vector<int64> batch_dim_indices(batch_dims.size());
std::iota(batch_dim_indices.begin(), batch_dim_indices.end(), 0);
int64 n_index = batch_dims.size() + 1;
@@ -332,7 +332,7 @@ xla::StatusOr<xla::XlaOp> ComputeWYRepresentation(
// rather than WY transformations.
xla::StatusOr<QRDecompositionResult> QRDecomposition(
xla::XlaOp a, bool full_matrices, int64 block_size,
- xla::PrecisionConfigProto::Precision precision) {
+ xla::PrecisionConfig::Precision precision) {
xla::XlaBuilder* builder = a.builder();
TF_ASSIGN_OR_RETURN(xla::Shape a_shape, builder->GetShape(a));
const int num_dims = xla::ShapeUtil::Rank(a_shape);
diff --git a/tensorflow/compiler/tf2xla/lib/qr.h b/tensorflow/compiler/tf2xla/lib/qr.h
index 8a389fb7b0..24b537ac8b 100644
--- a/tensorflow/compiler/tf2xla/lib/qr.h
+++ b/tensorflow/compiler/tf2xla/lib/qr.h
@@ -35,8 +35,7 @@ struct QRDecompositionResult {
xla::StatusOr<QRDecompositionResult> QRDecomposition(
xla::XlaOp a, bool full_matrices, int64 block_size = 128,
- xla::PrecisionConfigProto::Precision precision =
- xla::PrecisionConfigProto::HIGHEST);
+ xla::PrecisionConfig::Precision precision = xla::PrecisionConfig::HIGHEST);
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/lib/triangular_solve.cc b/tensorflow/compiler/tf2xla/lib/triangular_solve.cc
index 37b2240b45..6524c2a9b1 100644
--- a/tensorflow/compiler/tf2xla/lib/triangular_solve.cc
+++ b/tensorflow/compiler/tf2xla/lib/triangular_solve.cc
@@ -110,9 +110,9 @@ xla::XlaOp DiagonalBlocks(xla::XlaOp a, int64 block_size) {
});
}
-xla::XlaOp InvertDiagonalBlocks(
- xla::XlaOp diag_blocks, bool lower, bool transpose_a, bool conjugate_a,
- xla::PrecisionConfigProto::Precision precision) {
+xla::XlaOp InvertDiagonalBlocks(xla::XlaOp diag_blocks, bool lower,
+ bool transpose_a, bool conjugate_a,
+ xla::PrecisionConfig::Precision precision) {
xla::XlaBuilder* builder = diag_blocks.builder();
return builder->ReportErrorOrReturn([&]() -> xla::StatusOr<xla::XlaOp> {
// Input is a batch of square lower triangular square matrices. Its shape is
@@ -216,7 +216,7 @@ xla::XlaOp InvertDiagonalBlocks(
dnums.add_rhs_batch_dimensions(0);
dnums.add_lhs_contracting_dimensions(2);
dnums.add_rhs_contracting_dimensions(1);
- xla::PrecisionConfigProto precision_proto;
+ xla::PrecisionConfig precision_proto;
precision_proto.add_operand_precision(precision);
precision_proto.add_operand_precision(precision);
auto update = -DotGeneral(input_row, body_out, dnums, &precision_proto);
@@ -245,7 +245,7 @@ xla::XlaOp InvertDiagonalBlocks(
xla::XlaOp SolveWithInvertedDiagonalBlocks(
xla::XlaOp a, xla::XlaOp b, xla::XlaOp inv_diag_blocks, bool left_side,
bool lower, bool transpose_a, bool conjugate_a,
- xla::PrecisionConfigProto::Precision precision) {
+ xla::PrecisionConfig::Precision precision) {
xla::XlaBuilder* builder = a.builder();
return builder->ReportErrorOrReturn([&]() -> xla::StatusOr<xla::XlaOp> {
TF_ASSIGN_OR_RETURN(xla::Shape blocks_shape,
@@ -346,7 +346,7 @@ xla::XlaOp SolveWithInvertedDiagonalBlocks(
xla::XlaOp TriangularSolve(xla::XlaOp a, xla::XlaOp b, bool left_side,
bool lower, bool transpose_a, bool conjugate_a,
int64 block_size,
- xla::PrecisionConfigProto::Precision precision) {
+ xla::PrecisionConfig::Precision precision) {
xla::XlaBuilder* builder = a.builder();
return builder->ReportErrorOrReturn([&]() -> xla::StatusOr<xla::XlaOp> {
TF_ASSIGN_OR_RETURN(xla::Shape a_shape, builder->GetShape(a));
diff --git a/tensorflow/compiler/tf2xla/lib/triangular_solve.h b/tensorflow/compiler/tf2xla/lib/triangular_solve.h
index ac42a48352..2303234f36 100644
--- a/tensorflow/compiler/tf2xla/lib/triangular_solve.h
+++ b/tensorflow/compiler/tf2xla/lib/triangular_solve.h
@@ -57,11 +57,10 @@ namespace tensorflow {
//
// Uses a blocked algorithm if `block_size` is > 1; if block_size == 1 then no
// blocking is used.
-xla::XlaOp TriangularSolve(xla::XlaOp a, xla::XlaOp b, bool left_side,
- bool lower, bool transpose_a, bool conjugate_a,
- int64 block_size = 128,
- xla::PrecisionConfigProto::Precision precision =
- xla::PrecisionConfigProto::HIGHEST);
+xla::XlaOp TriangularSolve(
+ xla::XlaOp a, xla::XlaOp b, bool left_side, bool lower, bool transpose_a,
+ bool conjugate_a, int64 block_size = 128,
+ xla::PrecisionConfig::Precision precision = xla::PrecisionConfig::HIGHEST);
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/lib/while_loop.cc b/tensorflow/compiler/tf2xla/lib/while_loop.cc
index 5300e2c878..594ab1dfd0 100644
--- a/tensorflow/compiler/tf2xla/lib/while_loop.cc
+++ b/tensorflow/compiler/tf2xla/lib/while_loop.cc
@@ -24,7 +24,7 @@ namespace tensorflow {
xla::StatusOr<std::vector<xla::XlaOp>> XlaWhileLoop(
const LoopConditionFunction& condition_function,
const LoopBodyFunction& body_function,
- absl::Span<const xla::XlaOp> initial_values, StringPiece name,
+ absl::Span<const xla::XlaOp> initial_values, absl::string_view name,
xla::XlaBuilder* builder) {
int arity = initial_values.size();
std::vector<xla::Shape> var_shapes;
@@ -47,7 +47,7 @@ xla::StatusOr<std::vector<xla::XlaOp>> XlaWhileLoop(
// Build the condition.
std::unique_ptr<xla::XlaBuilder> cond_builder =
- builder->CreateSubBuilder(strings::StrCat(name, "_condition"));
+ builder->CreateSubBuilder(absl::StrCat(name, "_condition"));
{
auto parameter =
xla::Parameter(cond_builder.get(), 0, tuple_shape, "parameter");
@@ -61,7 +61,7 @@ xla::StatusOr<std::vector<xla::XlaOp>> XlaWhileLoop(
// Build the body.
std::unique_ptr<xla::XlaBuilder> body_builder =
- builder->CreateSubBuilder(strings::StrCat(name, "_body"));
+ builder->CreateSubBuilder(absl::StrCat(name, "_body"));
{
auto parameter =
xla::Parameter(body_builder.get(), 0, tuple_shape, "parameter");
@@ -84,7 +84,7 @@ xla::StatusOr<std::vector<xla::XlaOp>> XlaWhileLoop(
xla::StatusOr<std::vector<xla::XlaOp>> XlaForEachIndex(
int64 num_iterations, xla::PrimitiveType num_iterations_type,
const ForEachIndexBodyFunction& body_function,
- absl::Span<const xla::XlaOp> initial_values, StringPiece name,
+ absl::Span<const xla::XlaOp> initial_values, absl::string_view name,
xla::XlaBuilder* builder) {
auto while_cond_fn =
[&](absl::Span<const xla::XlaOp> values,
diff --git a/tensorflow/compiler/tf2xla/lib/while_loop.h b/tensorflow/compiler/tf2xla/lib/while_loop.h
index 115ebf390d..f2134bb449 100644
--- a/tensorflow/compiler/tf2xla/lib/while_loop.h
+++ b/tensorflow/compiler/tf2xla/lib/while_loop.h
@@ -19,11 +19,11 @@ limitations under the License.
#include <functional>
#include <vector>
+#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/xla/client/xla_builder.h"
#include "tensorflow/compiler/xla/client/xla_computation.h"
#include "tensorflow/compiler/xla/statusor.h"
-#include "tensorflow/core/lib/core/stringpiece.h"
namespace tensorflow {
@@ -50,7 +50,7 @@ typedef std::function<xla::StatusOr<std::vector<xla::XlaOp>>(
xla::StatusOr<std::vector<xla::XlaOp>> XlaWhileLoop(
const LoopConditionFunction& condition_function,
const LoopBodyFunction& body_function,
- absl::Span<const xla::XlaOp> initial_values, StringPiece name,
+ absl::Span<const xla::XlaOp> initial_values, absl::string_view name,
xla::XlaBuilder* builder);
// Builds an XLA loop that repeats a computation `num_iterations` times.
@@ -65,7 +65,7 @@ typedef std::function<xla::StatusOr<std::vector<xla::XlaOp>>(
xla::StatusOr<std::vector<xla::XlaOp>> XlaForEachIndex(
int64 num_iterations, xla::PrimitiveType num_iterations_type,
const ForEachIndexBodyFunction& body_function,
- absl::Span<const xla::XlaOp> initial_values, StringPiece name,
+ absl::Span<const xla::XlaOp> initial_values, absl::string_view name,
xla::XlaBuilder* builder);
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/ops/xla_ops.cc b/tensorflow/compiler/tf2xla/ops/xla_ops.cc
index 2cd9ae799f..68cfdc1785 100644
--- a/tensorflow/compiler/tf2xla/ops/xla_ops.cc
+++ b/tensorflow/compiler/tf2xla/ops/xla_ops.cc
@@ -83,7 +83,7 @@ lhs_dilation: dilation to apply between input elements
rhs_dilation: dilation to apply between kernel elements
feature_group_count: number of feature groups for grouped convolution.
dimension_numbers: a serialized xla::ConvolutionDimensionNumbers proto.
-precision_config: a serialized xla::PrecisionConfigProto proto.
+precision_config: a serialized xla::PrecisionConfig proto.
)doc");
REGISTER_OP("XlaDot")
@@ -102,7 +102,7 @@ Wraps the XLA ConvGeneralDilated operator, documented at
lhs: the LHS tensor
rhs: the RHS tensor
dimension_numbers: a serialized xla::DotDimensionNumbers proto.
-precision_config: a serialized xla::PrecisionConfigProto proto.
+precision_config: a serialized xla::PrecisionConfig proto.
)doc");
REGISTER_OP("XlaDynamicUpdateSlice")
diff --git a/tensorflow/compiler/tf2xla/resource_operation_table.cc b/tensorflow/compiler/tf2xla/resource_operation_table.cc
index 32ba6df2e6..20f2ce2919 100644
--- a/tensorflow/compiler/tf2xla/resource_operation_table.cc
+++ b/tensorflow/compiler/tf2xla/resource_operation_table.cc
@@ -18,7 +18,7 @@ limitations under the License.
#include "tensorflow/core/lib/gtl/flatmap.h"
namespace tensorflow {
-/*static*/ StringPiece XlaResourceOpInfo::XlaResourceOpKindToString(
+/*static*/ absl::string_view XlaResourceOpInfo::XlaResourceOpKindToString(
XlaResourceOpKind op_kind) {
switch (op_kind) {
case XlaResourceOpKind::kRead:
@@ -30,11 +30,11 @@ namespace tensorflow {
}
}
-static gtl::FlatMap<StringPiece, XlaResourceOpInfo>* CreateResourceOpInfoMap() {
- gtl::FlatMap<StringPiece, XlaResourceOpInfo>* result =
- new gtl::FlatMap<StringPiece, XlaResourceOpInfo>;
+static gtl::FlatMap<absl::string_view, XlaResourceOpInfo>*
+CreateResourceOpInfoMap() {
+ auto* result = new gtl::FlatMap<absl::string_view, XlaResourceOpInfo>;
- auto add = [&](StringPiece op, XlaResourceOpKind op_kind,
+ auto add = [&](absl::string_view op, XlaResourceOpKind op_kind,
XlaResourceKind resource_kind) {
auto insert_result =
result->insert({op, XlaResourceOpInfo(op_kind, resource_kind)});
@@ -103,23 +103,23 @@ static gtl::FlatMap<StringPiece, XlaResourceOpInfo>* CreateResourceOpInfoMap() {
return result;
}
-static const gtl::FlatMap<StringPiece, XlaResourceOpInfo>&
+static const gtl::FlatMap<absl::string_view, XlaResourceOpInfo>&
GetStaticResourceOpInfoMap() {
- static gtl::FlatMap<StringPiece, XlaResourceOpInfo>* op_info_map =
+ static gtl::FlatMap<absl::string_view, XlaResourceOpInfo>* op_info_map =
CreateResourceOpInfoMap();
return *op_info_map;
}
-const XlaResourceOpInfo* GetResourceOpInfoForOp(StringPiece op) {
- const gtl::FlatMap<StringPiece, XlaResourceOpInfo>& op_infos =
+const XlaResourceOpInfo* GetResourceOpInfoForOp(absl::string_view op) {
+ const gtl::FlatMap<absl::string_view, XlaResourceOpInfo>& op_infos =
GetStaticResourceOpInfoMap();
auto it = op_infos.find(op);
return it == op_infos.end() ? nullptr : &it->second;
}
namespace resource_op_table_internal {
-std::vector<StringPiece> GetKnownResourceOps() {
- std::vector<StringPiece> result;
+std::vector<absl::string_view> GetKnownResourceOps() {
+ std::vector<absl::string_view> result;
for (const auto& p : GetStaticResourceOpInfoMap()) {
result.push_back(p.first);
}
diff --git a/tensorflow/compiler/tf2xla/resource_operation_table.h b/tensorflow/compiler/tf2xla/resource_operation_table.h
index 7f627a64c6..61c7a56ff0 100644
--- a/tensorflow/compiler/tf2xla/resource_operation_table.h
+++ b/tensorflow/compiler/tf2xla/resource_operation_table.h
@@ -19,7 +19,7 @@ limitations under the License.
#include <string>
#include <vector>
-#include "tensorflow/core/lib/core/stringpiece.h"
+#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/logging.h"
// Exposes information about the resource operations supported by tf2xla in a
@@ -47,7 +47,7 @@ class XlaResourceOpInfo {
XlaResourceOpKind kind() const { return op_kind_; }
XlaResourceKind resource_kind() const { return resource_kind_; }
- static StringPiece XlaResourceOpKindToString(XlaResourceOpKind op_kind);
+ static absl::string_view XlaResourceOpKindToString(XlaResourceOpKind op_kind);
private:
XlaResourceOpKind op_kind_;
@@ -57,13 +57,13 @@ class XlaResourceOpInfo {
// Returns a XlaResourceOpInfo describing `op` if it is a resource operation
// supported by tf2xla, otherwise returns null (i.e. if this returns null then
// `op` is either not a resource operation or is unsupported by XLA).
-const XlaResourceOpInfo* GetResourceOpInfoForOp(StringPiece op);
+const XlaResourceOpInfo* GetResourceOpInfoForOp(absl::string_view op);
namespace resource_op_table_internal {
// NB! Implementation detail exposed for unit testing, do not use.
//
// Returns the set of resource operations known by this module.
-std::vector<StringPiece> GetKnownResourceOps();
+std::vector<absl::string_view> GetKnownResourceOps();
} // namespace resource_op_table_internal
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/resource_operation_table_test.cc b/tensorflow/compiler/tf2xla/resource_operation_table_test.cc
index 0343f80de9..a85ef040a7 100644
--- a/tensorflow/compiler/tf2xla/resource_operation_table_test.cc
+++ b/tensorflow/compiler/tf2xla/resource_operation_table_test.cc
@@ -34,7 +34,7 @@ bool HasResourceInputOrOutput(const OpDef& op_def) {
TEST(ResourceOperationTableTest, HaveAllResourceOps) {
gtl::FlatMap<string, bool> known_resource_ops;
- for (StringPiece known_resource_op :
+ for (absl::string_view known_resource_op :
resource_op_table_internal::GetKnownResourceOps()) {
ASSERT_TRUE(
known_resource_ops.insert({string(known_resource_op), false}).second);
diff --git a/tensorflow/compiler/tf2xla/sharding_util.cc b/tensorflow/compiler/tf2xla/sharding_util.cc
index 2d7eb8b915..8aae498be1 100644
--- a/tensorflow/compiler/tf2xla/sharding_util.cc
+++ b/tensorflow/compiler/tf2xla/sharding_util.cc
@@ -17,7 +17,6 @@ limitations under the License.
#include "absl/strings/match.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/lib/core/errors.h"
-#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
diff --git a/tensorflow/compiler/tf2xla/tf2xla.cc b/tensorflow/compiler/tf2xla/tf2xla.cc
index f34af2d67d..7dbe3a0b58 100644
--- a/tensorflow/compiler/tf2xla/tf2xla.cc
+++ b/tensorflow/compiler/tf2xla/tf2xla.cc
@@ -22,6 +22,7 @@ limitations under the License.
#include <utility>
#include <vector>
+#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "tensorflow/compiler/tf2xla/dump_graph.h"
#include "tensorflow/compiler/tf2xla/shape_util.h"
@@ -41,7 +42,6 @@ limitations under the License.
#include "tensorflow/core/graph/graph_constructor.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/errors.h"
-#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
@@ -75,7 +75,7 @@ Status AddArgNodes(Graph* graph, const NodeMap& node_map,
auto node_it = node_map.find(remap_it->second);
if (node_it == node_map.end()) {
// Strip off the aot_feed_#/ prefix.
- StringPiece name(remap_it->second);
+ absl::string_view name(remap_it->second);
const auto index = name.find('/');
if (index > 0) name.remove_prefix(index + 1);
return errors::InvalidArgument(
@@ -89,7 +89,7 @@ Status AddArgNodes(Graph* graph, const NodeMap& node_map,
// explicitly specify or override them.
Node* arg_node = nullptr;
TF_RETURN_IF_ERROR(
- NodeBuilder(strings::StrCat("_arg_", arg_index), kArgOp)
+ NodeBuilder(absl::StrCat("_arg_", arg_index), kArgOp)
.Attr("T", BaseType(feed_node->output_type(output_index)))
.Attr("index", arg_index)
.Attr(kFeedIdAttr, TensorIdToString(feed.id()))
@@ -136,7 +136,7 @@ Status AddRetvalNodes(Graph* graph, const NodeMap& node_map,
// Connects fetch_node -> retval_node.
Node* retval_node = nullptr;
TF_RETURN_IF_ERROR(
- NodeBuilder(strings::StrCat("_retval_", ret_index), kRetvalOp)
+ NodeBuilder(absl::StrCat("_retval_", ret_index), kRetvalOp)
.Input(fetch_node, id.output_index())
.Attr("T", BaseType(fetch_node->output_type(id.output_index())))
.Attr("index", ret_index)
@@ -256,7 +256,7 @@ Status ConvertGraphToXla(std::unique_ptr<Graph> graph, xla::Client* client,
XlaOpRegistry::RegisterCompilationKernels();
for (Node* node : graph->nodes()) {
node->set_assigned_device_name(
- strings::StrCat("/device:", DEVICE_CPU_XLA_JIT));
+ absl::StrCat("/device:", DEVICE_CPU_XLA_JIT));
}
std::vector<XlaCompiler::Argument> xla_args;
TF_RETURN_IF_ERROR(CreateXlaArgs(*graph, &xla_args));
diff --git a/tensorflow/compiler/tf2xla/tf2xla_util.cc b/tensorflow/compiler/tf2xla/tf2xla_util.cc
index e284e0b191..211caf8736 100644
--- a/tensorflow/compiler/tf2xla/tf2xla_util.cc
+++ b/tensorflow/compiler/tf2xla/tf2xla_util.cc
@@ -20,6 +20,7 @@ limitations under the License.
#include <set>
#include <unordered_map>
+#include "absl/strings/str_cat.h"
#include "absl/types/optional.h"
#include "tensorflow/compiler/tf2xla/sharding_util.h"
#include "tensorflow/compiler/tf2xla/tf2xla.pb.h"
@@ -33,7 +34,6 @@ limitations under the License.
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
-#include "tensorflow/core/lib/strings/strcat.h"
namespace tensorflow {
@@ -112,8 +112,8 @@ Status AddPlaceholdersForFeeds(
const string name_port = TensorIdToString(feed->id());
PlaceholderInfo& info = placeholder_info[name_port];
info.feed = feed;
- info.placeholder_name = strings::StrCat(
- "aot_feed_", feed->id().output_index(), "/", feed->id().node_name());
+ info.placeholder_name = absl::StrCat("aot_feed_", feed->id().output_index(),
+ "/", feed->id().node_name());
(*feed_remapping)[name_port] = info.placeholder_name;
}
@@ -258,7 +258,7 @@ Status PruneGraphDefInto(const tf2xla::Config& config, const GraphDef& in,
}
string TensorIdToString(const tf2xla::TensorId& id) {
- return strings::StrCat(id.node_name(), ":", id.output_index());
+ return absl::StrCat(id.node_name(), ":", id.output_index());
}
Status SetNodeShardingFromNeighbors(Node* n, bool out_edges) {
@@ -289,7 +289,7 @@ Status SetNodeShardingFromNeighbors(Node* n, bool out_edges) {
return Status::OK();
}
-void AddDtypeToKernalDefConstraint(StringPiece name, DataType dtype,
+void AddDtypeToKernalDefConstraint(absl::string_view name, DataType dtype,
KernelDef* kdef) {
for (KernelDef::AttrConstraint& constraint : *kdef->mutable_constraint()) {
if (constraint.name() == name) {
diff --git a/tensorflow/compiler/tf2xla/tf2xla_util.h b/tensorflow/compiler/tf2xla/tf2xla_util.h
index 33620ef810..a29e764466 100644
--- a/tensorflow/compiler/tf2xla/tf2xla_util.h
+++ b/tensorflow/compiler/tf2xla/tf2xla_util.h
@@ -53,7 +53,7 @@ string TensorIdToString(const tf2xla::TensorId& id);
Status SetNodeShardingFromNeighbors(Node* n, bool out_edges);
// Add an allowed data type to the AttrConstraint with the given name.
-void AddDtypeToKernalDefConstraint(StringPiece name, DataType dtype,
+void AddDtypeToKernalDefConstraint(absl::string_view name, DataType dtype,
KernelDef* kdef);
// Returns the next random seed to use for seeding xla rng.
diff --git a/tensorflow/compiler/tf2xla/tf2xla_util_test.cc b/tensorflow/compiler/tf2xla/tf2xla_util_test.cc
index 2b1f724dc7..68441b3d47 100644
--- a/tensorflow/compiler/tf2xla/tf2xla_util_test.cc
+++ b/tensorflow/compiler/tf2xla/tf2xla_util_test.cc
@@ -16,6 +16,8 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "absl/strings/match.h"
+#include "absl/strings/str_cat.h"
+#include "absl/strings/string_view.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/data_flow_ops.h"
#include "tensorflow/cc/ops/function_ops.h"
@@ -25,8 +27,6 @@ limitations under the License.
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
-#include "tensorflow/core/lib/core/stringpiece.h"
-#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
@@ -153,7 +153,7 @@ static tf2xla::Config FetchesConfig(std::vector<string> fetches) {
tf2xla::Config config;
for (const auto& fetch_node_name : fetches) {
auto* fetch = config.add_fetch();
- fetch->set_name(strings::StrCat("fetch_", fetch_node_name));
+ fetch->set_name(absl::StrCat("fetch_", fetch_node_name));
fetch->mutable_id()->set_node_name(fetch_node_name);
}
return config;
diff --git a/tensorflow/compiler/tf2xla/xla_compilation_device.cc b/tensorflow/compiler/tf2xla/xla_compilation_device.cc
index d98237bd5c..7f860500c7 100644
--- a/tensorflow/compiler/tf2xla/xla_compilation_device.cc
+++ b/tensorflow/compiler/tf2xla/xla_compilation_device.cc
@@ -76,12 +76,11 @@ class XlaCompilationAllocator : public Allocator {
XlaCompilationDevice::XlaCompilationDevice(const SessionOptions& options,
DeviceType type)
- : LocalDevice(
- options,
- Device::BuildDeviceAttributes(
- strings::StrCat("/device:", type.type(), ":0"), type,
- Bytes(256 << 20), DeviceLocality(),
- strings::StrCat("device: XLA compilation device ", type.type()))),
+ : LocalDevice(options, Device::BuildDeviceAttributes(
+ absl::StrCat("/device:", type.type(), ":0"),
+ type, Bytes(256 << 20), DeviceLocality(),
+ absl::StrCat("device: XLA compilation device ",
+ type.type()))),
allocator_(new XlaCompilationAllocator()) {}
XlaCompilationDevice::~XlaCompilationDevice() {}
diff --git a/tensorflow/compiler/tf2xla/xla_compiler.cc b/tensorflow/compiler/tf2xla/xla_compiler.cc
index 0c300c282e..41d305d461 100644
--- a/tensorflow/compiler/tf2xla/xla_compiler.cc
+++ b/tensorflow/compiler/tf2xla/xla_compiler.cc
@@ -198,14 +198,14 @@ Status XlaCompiler::CompileFunction(const XlaCompiler::CompileOptions& options,
// lowest-numbered core that consumes the argument. We choose the
// lowest-numbered core so the assignment is deterministic.
for (Node* n : graph->nodes()) {
- if (StringPiece(n->type_string()) == "_Arg") {
+ if (absl::string_view(n->type_string()) == "_Arg") {
TF_RETURN_IF_ERROR(SetNodeShardingFromNeighbors(n, /*out_edges=*/true));
}
}
// Do _Retval as a second loop, in case the retval's input is an _Arg (which
// may have gotten a device assignment from the first loop).
for (Node* n : graph->nodes()) {
- if (StringPiece(n->type_string()) == "_Retval") {
+ if (absl::string_view(n->type_string()) == "_Retval") {
TF_RETURN_IF_ERROR(SetNodeShardingFromNeighbors(n, /*out_edges=*/false));
}
}
@@ -213,8 +213,7 @@ Status XlaCompiler::CompileFunction(const XlaCompiler::CompileOptions& options,
if (VLOG_IS_ON(2)) {
VLOG(2) << "XlaCompiler::CompileFunction: "
<< dump_graph::DumpGraphToFile(
- strings::StrCat("xla_compile_function_", function_id),
- *graph);
+ absl::StrCat("xla_compile_function_", function_id), *graph);
}
VLOG(1) << "====================================================";
@@ -522,7 +521,7 @@ Status XlaCompiler::BuildArguments(
// Use the _Arg nodes in the graph to resolve core assignments.
for (const Node* n : graph.nodes()) {
- if (StringPiece(n->type_string()) != "_Arg") continue;
+ if (absl::string_view(n->type_string()) != "_Arg") continue;
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index));
TF_RET_CHECK(index >= 0 && index < args.size())
@@ -581,7 +580,7 @@ Status XlaCompiler::BuildArguments(
builder, core == -1 ? absl::optional<xla::OpSharding>()
: xla::sharding_builder::AssignDevice(core));
arg_handles[i] = xla::Parameter(builder, i, (*input_shapes)[i],
- strings::StrCat("arg", i));
+ absl::StrCat("arg", i));
}
}
@@ -644,7 +643,7 @@ Status XlaCompiler::CompileSingleOp(
// dependency edge to the _SOURCE node.
for (int64 i = 0; i < ctx->num_inputs(); ++i) {
Node* node;
- string name = strings::StrCat(ctx->op_kernel().name(), "_", i, "_arg");
+ string name = absl::StrCat(ctx->op_kernel().name(), "_", i, "_arg");
Status status = NodeBuilder(name, "_Arg")
.ControlInput(graph->source_node())
.Attr("T", ctx->input_dtype(i))
@@ -657,7 +656,7 @@ Status XlaCompiler::CompileSingleOp(
// Similarly with return values, create dummy _Retval nodes fed by `node`.
for (int64 i = 0; i < ctx->num_outputs(); ++i) {
Node* node;
- string name = strings::StrCat(ctx->op_kernel().name(), "_", i, "_retval");
+ string name = absl::StrCat(ctx->op_kernel().name(), "_", i, "_retval");
Status status = NodeBuilder(name, "_Retval")
.Input(main_node, i)
.Attr("T", ctx->expected_output_dtype(i))
@@ -693,7 +692,7 @@ Status ValidateGraph(const Graph* graph,
const DeviceType& device_type, const string& name) {
auto maybe_error = [&](const Node* node, const Status& s) -> Status {
if (!s.ok()) {
- return errors::InvalidArgument(strings::StrCat(
+ return errors::InvalidArgument(absl::StrCat(
"Detected unsupported operations when trying to compile graph ", name,
" on ", device_type.type_string(), ": ", node->def().op(), " (",
s.error_message(), ")", FormatNodeForError(*node)));
@@ -734,7 +733,7 @@ Status XlaCompiler::CompileGraph(const XlaCompiler::CompileOptions& options,
if (VLOG_IS_ON(2)) {
VLOG(2) << "XlaCompiler::CompileGraph: "
<< dump_graph::DumpGraphToFile(
- strings::StrCat("xla_compile_graph_", name), *graph);
+ absl::StrCat("xla_compile_graph_", name), *graph);
}
// Report the error here if initialization failed.
diff --git a/tensorflow/compiler/tf2xla/xla_context.cc b/tensorflow/compiler/tf2xla/xla_context.cc
index 24a4b92b45..e8b4b0eb36 100644
--- a/tensorflow/compiler/tf2xla/xla_context.cc
+++ b/tensorflow/compiler/tf2xla/xla_context.cc
@@ -32,7 +32,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
-#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
diff --git a/tensorflow/compiler/tf2xla/xla_op_kernel.cc b/tensorflow/compiler/tf2xla/xla_op_kernel.cc
index 1499c99ed1..d67e50375b 100644
--- a/tensorflow/compiler/tf2xla/xla_op_kernel.cc
+++ b/tensorflow/compiler/tf2xla/xla_op_kernel.cc
@@ -67,7 +67,7 @@ const xla::XlaOp& XlaOpKernelContext::Input(int index) {
return GetComputationFromTensor(context_->input(index));
}
-const xla::XlaOp& XlaOpKernelContext::Input(StringPiece name) {
+const xla::XlaOp& XlaOpKernelContext::Input(absl::string_view name) {
return GetComputationFromTensor(GetInputTensorByName(name));
}
@@ -75,7 +75,7 @@ TensorShape XlaOpKernelContext::InputShape(int index) {
return context_->input(index).shape();
}
-TensorShape XlaOpKernelContext::InputShape(StringPiece name) {
+TensorShape XlaOpKernelContext::InputShape(absl::string_view name) {
return GetInputTensorByName(name).shape();
}
@@ -100,7 +100,7 @@ Status XlaOpKernelContext::ConstantInput(int index,
}
static xla::StatusOr<int> InputIndex(XlaOpKernelContext* context,
- StringPiece name) {
+ absl::string_view name) {
int start, stop;
TF_RETURN_IF_ERROR(context->op_kernel().InputRange(name, &start, &stop));
if (stop != start + 1) {
@@ -112,7 +112,7 @@ static xla::StatusOr<int> InputIndex(XlaOpKernelContext* context,
return start;
}
-Status XlaOpKernelContext::ConstantInput(StringPiece name,
+Status XlaOpKernelContext::ConstantInput(absl::string_view name,
xla::Literal* constant_literal) {
TF_ASSIGN_OR_RETURN(int index, InputIndex(this, name));
return ConstantInput(index, constant_literal);
@@ -265,7 +265,7 @@ Status XlaOpKernelContext::ConstantInputAsIntScalar(int index, int64* out) {
return LiteralToInt64Scalar(literal, out);
}
-Status XlaOpKernelContext::ConstantInputAsIntScalar(StringPiece name,
+Status XlaOpKernelContext::ConstantInputAsIntScalar(absl::string_view name,
int64* out) {
TF_ASSIGN_OR_RETURN(int index, InputIndex(this, name));
return ConstantInputAsIntScalar(index, out);
@@ -305,7 +305,7 @@ Status XlaOpKernelContext::ConstantInputAsIntVector(int index,
return LiteralToInt64Vector(literal, out);
}
-Status XlaOpKernelContext::ConstantInputAsIntVector(StringPiece name,
+Status XlaOpKernelContext::ConstantInputAsIntVector(absl::string_view name,
std::vector<int64>* out) {
TF_ASSIGN_OR_RETURN(int index, InputIndex(this, name));
return ConstantInputAsIntVector(index, out);
@@ -344,7 +344,7 @@ Status XlaOpKernelContext::ConstantInputAsInt64Literal(int index,
}
}
-Status XlaOpKernelContext::ConstantInputAsInt64Literal(StringPiece name,
+Status XlaOpKernelContext::ConstantInputAsInt64Literal(absl::string_view name,
xla::Literal* out) {
TF_ASSIGN_OR_RETURN(int index, InputIndex(this, name));
return ConstantInputAsInt64Literal(index, out);
@@ -361,7 +361,7 @@ Status XlaOpKernelContext::ConstantInputAsShape(int index, TensorShape* shape) {
return Status::OK();
}
-Status XlaOpKernelContext::InputList(StringPiece name,
+Status XlaOpKernelContext::InputList(absl::string_view name,
std::vector<xla::XlaOp>* handles,
std::vector<TensorShape>* shapes) {
OpInputList inputs;
@@ -376,7 +376,7 @@ Status XlaOpKernelContext::InputList(StringPiece name,
}
Status XlaOpKernelContext::ConstantInputList(
- StringPiece name, std::vector<xla::Literal>* outputs) {
+ absl::string_view name, std::vector<xla::Literal>* outputs) {
int start, stop;
TF_RETURN_IF_ERROR(op_kernel().InputRange(name, &start, &stop));
outputs->resize(stop - start);
@@ -429,8 +429,8 @@ Status XlaOpKernelContext::ReadVariableInput(int index, DataType type,
value);
}
-Status XlaOpKernelContext::ReadVariableInput(StringPiece name, DataType type,
- TensorShape* shape,
+Status XlaOpKernelContext::ReadVariableInput(absl::string_view name,
+ DataType type, TensorShape* shape,
xla::XlaOp* value) {
return ReadVariableInputTensor(GetInputTensorByName(name), type, context_,
shape, value);
@@ -564,7 +564,7 @@ Status XlaOpKernelContext::AssignVariable(int input_index, DataType type,
handle, builder());
}
-Status XlaOpKernelContext::AssignVariable(StringPiece name, DataType type,
+Status XlaOpKernelContext::AssignVariable(absl::string_view name, DataType type,
xla::XlaOp handle) {
TF_RET_CHECK(handle.valid());
return AssignVariableTensor(GetInputTensorByName(name), type, context_,
@@ -610,7 +610,7 @@ const xla::XlaComputation* XlaOpKernelContext::GetOrCreateMul(
return XlaContext::Get(context_).GetOrCreateMul(type);
}
-const Tensor& XlaOpKernelContext::GetInputTensorByName(StringPiece name) {
+const Tensor& XlaOpKernelContext::GetInputTensorByName(absl::string_view name) {
const Tensor* tensor;
CHECK(context_->input(name, &tensor).ok());
return *tensor;
diff --git a/tensorflow/compiler/tf2xla/xla_op_kernel.h b/tensorflow/compiler/tf2xla/xla_op_kernel.h
index 45cfa7da74..962c86d3a5 100644
--- a/tensorflow/compiler/tf2xla/xla_op_kernel.h
+++ b/tensorflow/compiler/tf2xla/xla_op_kernel.h
@@ -80,14 +80,14 @@ class XlaOpKernelContext {
TensorShape InputShape(int index);
// Returns the shape of input `name`.
- TensorShape InputShape(StringPiece name);
+ TensorShape InputShape(absl::string_view name);
// Returns input `index` as a XlaOp. Unlike
// OpKernelContext::Input returns a symbolic value rather than a concrete
// Tensor.
const xla::XlaOp& Input(int index);
// Returns input `name` as a XlaOp.
- const xla::XlaOp& Input(StringPiece name);
+ const xla::XlaOp& Input(absl::string_view name);
// Returns true if all inputs are the same shape, otherwise sets the
// status to a non-OK value and returns false.
@@ -97,7 +97,7 @@ class XlaOpKernelContext {
// Returns the named list-valued immutable input in "list", as
// defined in the OpDef. If the named output is not list-valued,
// returns a one-element list.
- Status InputList(StringPiece name, std::vector<xla::XlaOp>* handles,
+ Status InputList(absl::string_view name, std::vector<xla::XlaOp>* handles,
std::vector<TensorShape>* shapes);
// Helper methods for constant inputs.
@@ -106,7 +106,7 @@ class XlaOpKernelContext {
// expression cannot be evaluated, e.g., because it depends on unbound
// parameters, returns a non-OK status.
Status ConstantInput(int index, xla::Literal* constant_literal);
- Status ConstantInput(StringPiece name, xla::Literal* constant_literal);
+ Status ConstantInput(absl::string_view name, xla::Literal* constant_literal);
// Evaluates input `index`, reshapes it to `new_shape` if new_shape !=
// InputShape(index), and stores it in `*constant_literal`. If the input
@@ -118,14 +118,15 @@ class XlaOpKernelContext {
// Converts a constant scalar int32 or int64 tensor into an int64.
Status ConstantInputAsIntScalar(int index, int64* out);
- Status ConstantInputAsIntScalar(StringPiece name, int64* out);
+ Status ConstantInputAsIntScalar(absl::string_view name, int64* out);
// Converts a constant scalar float32 or float64 tensor into a float64.
Status ConstantInputAsFloatScalar(int index, double* out);
// Converts a constant 1D int32 or int64 tensor into a vector of int64s.
Status ConstantInputAsIntVector(int index, std::vector<int64>* out);
- Status ConstantInputAsIntVector(StringPiece name, std::vector<int64>* out);
+ Status ConstantInputAsIntVector(absl::string_view name,
+ std::vector<int64>* out);
// Reshapes and converts a constant int32 or int64 tensor into a vector of
// int64s.
@@ -133,7 +134,7 @@ class XlaOpKernelContext {
// Converts a constant int32 or int64 Tensor into an xla int64 Literal.
Status ConstantInputAsInt64Literal(int index, xla::Literal* out);
- Status ConstantInputAsInt64Literal(StringPiece name, xla::Literal* out);
+ Status ConstantInputAsInt64Literal(absl::string_view name, xla::Literal* out);
// Converts a constant 1D int32 or int64 tensor into a TensorShape.
Status ConstantInputAsShape(int index, TensorShape* shape);
@@ -141,7 +142,7 @@ class XlaOpKernelContext {
// Returns the named list-valued immutable input in "list", as
// defined in the OpDef. If the named output is not list-valued,
// returns a one-element list.
- Status ConstantInputList(StringPiece name,
+ Status ConstantInputList(absl::string_view name,
std::vector<xla::Literal>* literals);
// Outputs
@@ -190,8 +191,8 @@ class XlaOpKernelContext {
xla::XlaOp* value);
// Reads the current value of the resouce variable referred to by input
// `name`.
- Status ReadVariableInput(StringPiece name, DataType type, TensorShape* shape,
- xla::XlaOp* value);
+ Status ReadVariableInput(absl::string_view name, DataType type,
+ TensorShape* shape, xla::XlaOp* value);
// Assigns the value `handle` to the variable referenced by input
// `input_index`. The variable must be of `type`. Returns an error if the
@@ -199,7 +200,8 @@ class XlaOpKernelContext {
// different shape.
Status AssignVariable(int input_index, DataType type, xla::XlaOp handle);
// Assigns the value `handle` to the variable referenced by input `name`.
- Status AssignVariable(StringPiece name, DataType type, xla::XlaOp handle);
+ Status AssignVariable(absl::string_view name, DataType type,
+ xla::XlaOp handle);
// Helper routines for the OP_REQUIRES macros
void CtxFailure(const Status& s);
@@ -248,7 +250,7 @@ class XlaOpKernelContext {
private:
// Returns the tensor of input `name`.
- const Tensor& GetInputTensorByName(StringPiece name);
+ const Tensor& GetInputTensorByName(absl::string_view name);
OpKernelContext* const context_;
};
diff --git a/tensorflow/compiler/tf2xla/xla_op_registry.cc b/tensorflow/compiler/tf2xla/xla_op_registry.cc
index dae2d956ca..b0eeee3174 100644
--- a/tensorflow/compiler/tf2xla/xla_op_registry.cc
+++ b/tensorflow/compiler/tf2xla/xla_op_registry.cc
@@ -371,26 +371,28 @@ XlaOpRegistry& XlaOpRegistry::Instance() {
return *r;
}
-XlaOpRegistrationBuilder::XlaOpRegistrationBuilder(StringPiece name) {
+XlaOpRegistrationBuilder::XlaOpRegistrationBuilder(absl::string_view name) {
registration_.reset(new XlaOpRegistry::OpRegistration);
registration_->name = string(name);
}
-XlaOpRegistrationBuilder XlaOpRegistrationBuilder::Name(StringPiece name) {
+XlaOpRegistrationBuilder XlaOpRegistrationBuilder::Name(
+ absl::string_view name) {
XlaOpRegistrationBuilder registration(name);
return registration;
}
XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::Device(
- absl::Span<const StringPiece> devices) {
+ absl::Span<const absl::string_view> devices) {
registration_->has_device_whitelist = true;
- for (StringPiece device : devices) {
+ for (absl::string_view device : devices) {
registration_->device_whitelist.emplace(device);
}
return *this;
}
-XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::Device(StringPiece device) {
+XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::Device(
+ absl::string_view device) {
registration_->has_device_whitelist = true;
registration_->device_whitelist.emplace(device);
return *this;
@@ -407,7 +409,7 @@ XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::AllowResourceTypes() {
}
XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::TypeConstraint(
- StringPiece attr_name, DataType allowed) {
+ absl::string_view attr_name, DataType allowed) {
std::set<DataType>& types =
registration_->type_constraints[string(attr_name)];
types.insert(allowed);
@@ -415,7 +417,7 @@ XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::TypeConstraint(
}
XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::TypeConstraint(
- StringPiece attr_name, absl::Span<const DataType> allowed) {
+ absl::string_view attr_name, absl::Span<const DataType> allowed) {
std::set<DataType>& types =
registration_->type_constraints[string(attr_name)];
for (DataType t : allowed) {
@@ -425,7 +427,7 @@ XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::TypeConstraint(
}
XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::CompileTimeConstInput(
- StringPiece input_name) {
+ absl::string_view input_name) {
registration_->compile_time_constant_inputs.emplace(input_name);
return *this;
}
@@ -452,7 +454,7 @@ XlaOpRegistrar::XlaOpRegistrar(
}
XlaBackendRegistrar::XlaBackendRegistrar(
- StringPiece name, absl::Span<const DataType> types,
+ absl::string_view name, absl::Span<const DataType> types,
XlaOpRegistry::BackendOpFilter op_filter) {
XlaOpRegistry& registry = XlaOpRegistry::Instance();
registry.RegisterBackend(string(name), types, op_filter);
diff --git a/tensorflow/compiler/tf2xla/xla_op_registry.h b/tensorflow/compiler/tf2xla/xla_op_registry.h
index c640842dc0..74a4885f1f 100644
--- a/tensorflow/compiler/tf2xla/xla_op_registry.h
+++ b/tensorflow/compiler/tf2xla/xla_op_registry.h
@@ -232,18 +232,18 @@ class XlaOpRegistry {
class XlaOpRegistrationBuilder {
public:
// Starts an operator registration chain.
- static XlaOpRegistrationBuilder Name(StringPiece name);
+ static XlaOpRegistrationBuilder Name(absl::string_view name);
// Specifies a whitelist of devices on which the operator may run.
- XlaOpRegistrationBuilder& Device(StringPiece devices);
- XlaOpRegistrationBuilder& Device(absl::Span<const StringPiece> devices);
+ XlaOpRegistrationBuilder& Device(absl::string_view devices);
+ XlaOpRegistrationBuilder& Device(absl::Span<const absl::string_view> devices);
// Specifies a type constraint for a type variable attribute. Each constraint
// specifies the set of types that the type variable may assume.
- XlaOpRegistrationBuilder& TypeConstraint(StringPiece attr_name,
+ XlaOpRegistrationBuilder& TypeConstraint(absl::string_view attr_name,
DataType allowed);
- XlaOpRegistrationBuilder& TypeConstraint(StringPiece attr_name,
+ XlaOpRegistrationBuilder& TypeConstraint(absl::string_view attr_name,
absl::Span<const DataType> allowed);
// Specifies that a dummy copy of this operator should not be registered on
@@ -254,13 +254,13 @@ class XlaOpRegistrationBuilder {
XlaOpRegistrationBuilder& AllowResourceTypes();
// Mark 'input_name' as an argument whose value must be known at compile-time.
- XlaOpRegistrationBuilder& CompileTimeConstInput(StringPiece input_name);
+ XlaOpRegistrationBuilder& CompileTimeConstInput(absl::string_view input_name);
std::unique_ptr<XlaOpRegistry::OpRegistration> Build(
XlaOpRegistry::Factory factory);
private:
- XlaOpRegistrationBuilder(StringPiece name);
+ XlaOpRegistrationBuilder(absl::string_view name);
std::unique_ptr<XlaOpRegistry::OpRegistration> registration_;
};
@@ -288,7 +288,7 @@ class XlaOpRegistrar {
class XlaBackendRegistrar {
public:
- XlaBackendRegistrar(StringPiece name, absl::Span<const DataType> types,
+ XlaBackendRegistrar(absl::string_view name, absl::Span<const DataType> types,
XlaOpRegistry::BackendOpFilter op_filter = nullptr);
};
diff --git a/tensorflow/compiler/tf2xla/xla_resource.cc b/tensorflow/compiler/tf2xla/xla_resource.cc
index 7928fa0347..56c2e01055 100644
--- a/tensorflow/compiler/tf2xla/xla_resource.cc
+++ b/tensorflow/compiler/tf2xla/xla_resource.cc
@@ -43,7 +43,7 @@ XlaResource::XlaResource(Kind kind, int arg_num, string name, DataType type,
for (const string& gradient : tensor_array_gradients) {
tensor_array_gradients_[gradient].reset(new XlaResource(
/*kind=*/kTensorArray, /*arg_num=*/-1,
- /*name=*/strings::StrCat("TensorArrayGrad: ", name_), type_, shape_,
+ /*name=*/absl::StrCat("TensorArrayGrad: ", name_), type_, shape_,
xla::XlaOp(), tensor_array_size_, /*tensor_array_gradients=*/{}));
}
}
@@ -135,7 +135,7 @@ Status XlaResource::GetOrCreateTensorArrayGradient(const string& source,
xla::Broadcast(XlaHelpers::Zero(builder, type_), ta_shape.dim_sizes());
gradient.reset(
new XlaResource(/*kind=*/kTensorArray, /*arg_num=*/-1,
- /*name=*/strings::StrCat("TensorArrayGrad: ", name_),
+ /*name=*/absl::StrCat("TensorArrayGrad: ", name_),
type_, shape_, gradient_value, tensor_array_size_,
/*tensor_array_gradients=*/{}));
}
diff --git a/tensorflow/compiler/xla/client/xla_builder.cc b/tensorflow/compiler/xla/client/xla_builder.cc
index 7f2125f74c..887b970661 100644
--- a/tensorflow/compiler/xla/client/xla_builder.cc
+++ b/tensorflow/compiler/xla/client/xla_builder.cc
@@ -820,7 +820,7 @@ XlaOp XlaBuilder::Lt(const XlaOp& lhs, const XlaOp& rhs,
}
XlaOp XlaBuilder::Dot(const XlaOp& lhs, const XlaOp& rhs,
- const PrecisionConfigProto* precision_config_proto) {
+ const PrecisionConfig* precision_config) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape& lhs_shape, GetShape(lhs));
@@ -828,14 +828,13 @@ XlaOp XlaBuilder::Dot(const XlaOp& lhs, const XlaOp& rhs,
dimension_numbers.add_lhs_contracting_dimensions(
lhs_shape.dimensions_size() == 1 ? 0 : 1);
dimension_numbers.add_rhs_contracting_dimensions(0);
- return DotGeneral(lhs, rhs, dimension_numbers, precision_config_proto);
+ return DotGeneral(lhs, rhs, dimension_numbers, precision_config);
});
}
-XlaOp XlaBuilder::DotGeneral(
- const XlaOp& lhs, const XlaOp& rhs,
- const DotDimensionNumbers& dimension_numbers,
- const PrecisionConfigProto* precision_config_proto) {
+XlaOp XlaBuilder::DotGeneral(const XlaOp& lhs, const XlaOp& rhs,
+ const DotDimensionNumbers& dimension_numbers,
+ const PrecisionConfig* precision_config) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& lhs_shape, GetShape(lhs));
@@ -844,8 +843,8 @@ XlaOp XlaBuilder::DotGeneral(
ShapeInference::InferDotOpShape(lhs_shape, rhs_shape,
dimension_numbers));
*instr.mutable_dot_dimension_numbers() = dimension_numbers;
- if (precision_config_proto != nullptr) {
- *instr.mutable_precision_config() = *precision_config_proto;
+ if (precision_config != nullptr) {
+ *instr.mutable_precision_config() = *precision_config;
}
return AddInstruction(std::move(instr), HloOpcode::kDot, {lhs, rhs});
});
@@ -899,28 +898,26 @@ Status XlaBuilder::VerifyConvolution(
XlaOp XlaBuilder::Conv(const XlaOp& lhs, const XlaOp& rhs,
absl::Span<const int64> window_strides, Padding padding,
int64 feature_group_count,
- const PrecisionConfigProto* precision_config_proto) {
+ const PrecisionConfig* precision_config) {
return ConvWithGeneralDimensions(
lhs, rhs, window_strides, padding,
CreateDefaultConvDimensionNumbers(window_strides.size()),
- feature_group_count, precision_config_proto);
+ feature_group_count, precision_config);
}
XlaOp XlaBuilder::ConvWithGeneralPadding(
const XlaOp& lhs, const XlaOp& rhs, absl::Span<const int64> window_strides,
absl::Span<const std::pair<int64, int64>> padding,
- int64 feature_group_count,
- const PrecisionConfigProto* precision_config_proto) {
+ int64 feature_group_count, const PrecisionConfig* precision_config) {
return ConvGeneral(lhs, rhs, window_strides, padding,
CreateDefaultConvDimensionNumbers(window_strides.size()),
- feature_group_count, precision_config_proto);
+ feature_group_count, precision_config);
}
XlaOp XlaBuilder::ConvWithGeneralDimensions(
const XlaOp& lhs, const XlaOp& rhs, absl::Span<const int64> window_strides,
Padding padding, const ConvolutionDimensionNumbers& dimension_numbers,
- int64 feature_group_count,
- const PrecisionConfigProto* precision_config_proto) {
+ int64 feature_group_count, const PrecisionConfig* precision_config) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape& lhs_shape, GetShape(lhs));
TF_ASSIGN_OR_RETURN(const Shape& rhs_shape, GetShape(rhs));
@@ -948,7 +945,7 @@ XlaOp XlaBuilder::ConvWithGeneralDimensions(
MakePadding(base_area_dimensions, window_dimensions,
window_strides, padding),
dimension_numbers, feature_group_count,
- precision_config_proto);
+ precision_config);
});
}
@@ -956,11 +953,10 @@ XlaOp XlaBuilder::ConvGeneral(
const XlaOp& lhs, const XlaOp& rhs, absl::Span<const int64> window_strides,
absl::Span<const std::pair<int64, int64>> padding,
const ConvolutionDimensionNumbers& dimension_numbers,
- int64 feature_group_count,
- const PrecisionConfigProto* precision_config_proto) {
+ int64 feature_group_count, const PrecisionConfig* precision_config) {
return ConvGeneralDilated(lhs, rhs, window_strides, padding, {}, {},
dimension_numbers, feature_group_count,
- precision_config_proto);
+ precision_config);
}
XlaOp XlaBuilder::ConvGeneralDilated(
@@ -968,8 +964,7 @@ XlaOp XlaBuilder::ConvGeneralDilated(
absl::Span<const std::pair<int64, int64>> padding,
absl::Span<const int64> lhs_dilation, absl::Span<const int64> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers,
- int64 feature_group_count,
- const PrecisionConfigProto* precision_config_proto) {
+ int64 feature_group_count, const PrecisionConfig* precision_config) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& lhs_shape, GetShape(lhs));
@@ -996,8 +991,8 @@ XlaOp XlaBuilder::ConvGeneralDilated(
*instr.mutable_convolution_dimension_numbers() = dimension_numbers;
instr.set_feature_group_count(feature_group_count);
- if (precision_config_proto != nullptr) {
- *instr.mutable_precision_config() = *precision_config_proto;
+ if (precision_config != nullptr) {
+ *instr.mutable_precision_config() = *precision_config;
}
return AddInstruction(std::move(instr), HloOpcode::kConvolution,
@@ -2594,43 +2589,40 @@ XlaOp Le(const XlaOp& lhs, const XlaOp& rhs,
}
XlaOp Dot(const XlaOp& lhs, const XlaOp& rhs,
- const PrecisionConfigProto* precision_config_proto) {
- return lhs.builder()->Dot(lhs, rhs, precision_config_proto);
+ const PrecisionConfig* precision_config) {
+ return lhs.builder()->Dot(lhs, rhs, precision_config);
}
XlaOp DotGeneral(const XlaOp& lhs, const XlaOp& rhs,
const DotDimensionNumbers& dimension_numbers,
- const PrecisionConfigProto* precision_config_proto) {
+ const PrecisionConfig* precision_config) {
return lhs.builder()->DotGeneral(lhs, rhs, dimension_numbers,
- precision_config_proto);
+ precision_config);
}
XlaOp Conv(const XlaOp& lhs, const XlaOp& rhs,
absl::Span<const int64> window_strides, Padding padding,
- int64 feature_group_count,
- const PrecisionConfigProto* precision_config_proto) {
+ int64 feature_group_count, const PrecisionConfig* precision_config) {
return lhs.builder()->Conv(lhs, rhs, window_strides, padding,
- feature_group_count, precision_config_proto);
+ feature_group_count, precision_config);
}
-XlaOp ConvWithGeneralPadding(
- const XlaOp& lhs, const XlaOp& rhs, absl::Span<const int64> window_strides,
- absl::Span<const std::pair<int64, int64>> padding,
- int64 feature_group_count,
- const PrecisionConfigProto* precision_config_proto) {
- return lhs.builder()->ConvWithGeneralPadding(lhs, rhs, window_strides,
- padding, feature_group_count,
- precision_config_proto);
+XlaOp ConvWithGeneralPadding(const XlaOp& lhs, const XlaOp& rhs,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding,
+ int64 feature_group_count,
+ const PrecisionConfig* precision_config) {
+ return lhs.builder()->ConvWithGeneralPadding(
+ lhs, rhs, window_strides, padding, feature_group_count, precision_config);
}
XlaOp ConvWithGeneralDimensions(
const XlaOp& lhs, const XlaOp& rhs, absl::Span<const int64> window_strides,
Padding padding, const ConvolutionDimensionNumbers& dimension_numbers,
- int64 feature_group_count,
- const PrecisionConfigProto* precision_config_proto) {
+ int64 feature_group_count, const PrecisionConfig* precision_config) {
return lhs.builder()->ConvWithGeneralDimensions(
lhs, rhs, window_strides, padding, dimension_numbers, feature_group_count,
- precision_config_proto);
+ precision_config);
}
XlaOp ConvGeneral(const XlaOp& lhs, const XlaOp& rhs,
@@ -2638,10 +2630,10 @@ XlaOp ConvGeneral(const XlaOp& lhs, const XlaOp& rhs,
absl::Span<const std::pair<int64, int64>> padding,
const ConvolutionDimensionNumbers& dimension_numbers,
int64 feature_group_count,
- const PrecisionConfigProto* precision_config_proto) {
+ const PrecisionConfig* precision_config) {
return lhs.builder()->ConvGeneral(lhs, rhs, window_strides, padding,
dimension_numbers, feature_group_count,
- precision_config_proto);
+ precision_config);
}
XlaOp ConvGeneralDilated(const XlaOp& lhs, const XlaOp& rhs,
@@ -2651,10 +2643,10 @@ XlaOp ConvGeneralDilated(const XlaOp& lhs, const XlaOp& rhs,
absl::Span<const int64> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers,
int64 feature_group_count,
- const PrecisionConfigProto* precision_config_proto) {
+ const PrecisionConfig* precision_config) {
return lhs.builder()->ConvGeneralDilated(
lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
- dimension_numbers, feature_group_count, precision_config_proto);
+ dimension_numbers, feature_group_count, precision_config);
}
XlaOp Fft(const XlaOp& operand, FftType fft_type,
diff --git a/tensorflow/compiler/xla/client/xla_builder.h b/tensorflow/compiler/xla/client/xla_builder.h
index 59fbc664f2..58e8f4e7fa 100644
--- a/tensorflow/compiler/xla/client/xla_builder.h
+++ b/tensorflow/compiler/xla/client/xla_builder.h
@@ -496,20 +496,19 @@ class XlaBuilder {
// Enqueues a dot instruction onto the computation.
XlaOp Dot(const XlaOp& lhs, const XlaOp& rhs,
- const PrecisionConfigProto* precision_config_proto = nullptr);
+ const PrecisionConfig* precision_config = nullptr);
// Enqueues a general dot instruction onto the computation.
- XlaOp DotGeneral(
- const XlaOp& lhs, const XlaOp& rhs,
- const DotDimensionNumbers& dimension_numbers,
- const PrecisionConfigProto* precision_config_proto = nullptr);
+ XlaOp DotGeneral(const XlaOp& lhs, const XlaOp& rhs,
+ const DotDimensionNumbers& dimension_numbers,
+ const PrecisionConfig* precision_config = nullptr);
// Enqueues a convolution instruction onto the computation, which uses the
// default convolution dimension numbers.
XlaOp Conv(const XlaOp& lhs, const XlaOp& rhs,
absl::Span<const int64> window_strides, Padding padding,
int64 feature_group_count = 1,
- const PrecisionConfigProto* precision_config_proto = nullptr);
+ const PrecisionConfig* precision_config = nullptr);
// Enqueues a convolution instruction onto the computation, with the caller
// provided padding configuration in the format returned by MakePadding().
@@ -518,7 +517,7 @@ class XlaBuilder {
absl::Span<const int64> window_strides,
absl::Span<const std::pair<int64, int64>> padding,
int64 feature_group_count = 1,
- const PrecisionConfigProto* precision_config_proto = nullptr);
+ const PrecisionConfig* precision_config = nullptr);
// Enqueues a convolution instruction onto the computation, with the caller
// provided dimension numbers configuration.
@@ -527,29 +526,27 @@ class XlaBuilder {
absl::Span<const int64> window_strides, Padding padding,
const ConvolutionDimensionNumbers& dimension_numbers,
int64 feature_group_count = 1,
- const PrecisionConfigProto* precision_config_proto = nullptr);
+ const PrecisionConfig* precision_config = nullptr);
// Enqueues a convolution instruction onto the computation, with the caller
// provided padding configuration as well as the dimension numbers.
- XlaOp ConvGeneral(
- const XlaOp& lhs, const XlaOp& rhs,
- absl::Span<const int64> window_strides,
- absl::Span<const std::pair<int64, int64>> padding,
- const ConvolutionDimensionNumbers& dimension_numbers,
- int64 feature_group_count = 1,
- const PrecisionConfigProto* precision_config_proto = nullptr);
+ XlaOp ConvGeneral(const XlaOp& lhs, const XlaOp& rhs,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding,
+ const ConvolutionDimensionNumbers& dimension_numbers,
+ int64 feature_group_count = 1,
+ const PrecisionConfig* precision_config = nullptr);
// Enqueues a convolution instruction onto the computation, with the caller
// provided padding configuration, dilation factors and dimension numbers.
- XlaOp ConvGeneralDilated(
- const XlaOp& lhs, const XlaOp& rhs,
- absl::Span<const int64> window_strides,
- absl::Span<const std::pair<int64, int64>> padding,
- absl::Span<const int64> lhs_dilation,
- absl::Span<const int64> rhs_dilation,
- const ConvolutionDimensionNumbers& dimension_numbers,
- int64 feature_group_count = 1,
- const PrecisionConfigProto* precision_config_proto = nullptr);
+ XlaOp ConvGeneralDilated(const XlaOp& lhs, const XlaOp& rhs,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding,
+ absl::Span<const int64> lhs_dilation,
+ absl::Span<const int64> rhs_dilation,
+ const ConvolutionDimensionNumbers& dimension_numbers,
+ int64 feature_group_count = 1,
+ const PrecisionConfig* precision_config = nullptr);
// Enqueues an FFT instruction onto the computation, of the given type and
// with the given FFT length.
@@ -1150,32 +1147,30 @@ class XlaBuilder {
friend XlaOp Le(const XlaOp& lhs, const XlaOp& rhs,
absl::Span<const int64> broadcast_dimensions);
friend XlaOp Dot(const XlaOp& lhs, const XlaOp& rhs,
- const PrecisionConfigProto* precision_config_proto);
+ const PrecisionConfig* precision_config);
friend XlaOp DotGeneral(const XlaOp& lhs, const XlaOp& rhs,
const DotDimensionNumbers& dimension_number,
- const PrecisionConfigProto* precision_config_proto);
+ const PrecisionConfig* precision_config);
friend XlaOp Conv(const XlaOp& lhs, const XlaOp& rhs,
absl::Span<const int64> window_strides, Padding padding,
int64 feature_group_count,
- const PrecisionConfigProto* precision_config_proto);
+ const PrecisionConfig* precision_config);
friend XlaOp ConvWithGeneralPadding(
const XlaOp& lhs, const XlaOp& rhs,
absl::Span<const int64> window_strides,
absl::Span<const std::pair<int64, int64>> padding,
- int64 feature_group_count,
- const PrecisionConfigProto* precision_config_proto);
+ int64 feature_group_count, const PrecisionConfig* precision_config);
friend XlaOp ConvWithGeneralDimensions(
const XlaOp& lhs, const XlaOp& rhs,
absl::Span<const int64> window_strides, Padding padding,
const ConvolutionDimensionNumbers& dimension_numbers,
- int64 feature_group_count,
- const PrecisionConfigProto* precision_config_proto);
+ int64 feature_group_count, const PrecisionConfig* precision_config);
friend XlaOp ConvGeneral(const XlaOp& lhs, const XlaOp& rhs,
absl::Span<const int64> window_strides,
absl::Span<const std::pair<int64, int64>> padding,
const ConvolutionDimensionNumbers& dimension_numbers,
int64 feature_group_count,
- const PrecisionConfigProto* precision_config_proto);
+ const PrecisionConfig* precision_config);
friend XlaOp ConvGeneralDilated(
const XlaOp& lhs, const XlaOp& rhs,
absl::Span<const int64> window_strides,
@@ -1183,8 +1178,7 @@ class XlaBuilder {
absl::Span<const int64> lhs_dilation,
absl::Span<const int64> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers,
- int64 feature_group_count,
- const PrecisionConfigProto* precision_config_proto);
+ int64 feature_group_count, const PrecisionConfig* precision_config);
friend XlaOp Fft(const XlaOp& operand, FftType fft_type,
absl::Span<const int64> fft_length);
friend XlaOp Infeed(XlaBuilder* builder, const Shape& shape,
@@ -1629,27 +1623,27 @@ XlaOp Le(const XlaOp& lhs, const XlaOp& rhs,
// Enqueues a dot instruction onto the computation.
XlaOp Dot(const XlaOp& lhs, const XlaOp& rhs,
- const PrecisionConfigProto* precision_config_proto = nullptr);
+ const PrecisionConfig* precision_config = nullptr);
// Enqueues a general dot instruction onto the computation.
XlaOp DotGeneral(const XlaOp& lhs, const XlaOp& rhs,
const DotDimensionNumbers& dimension_numbers,
- const PrecisionConfigProto* precision_config_proto = nullptr);
+ const PrecisionConfig* precision_config = nullptr);
// Enqueues a convolution instruction onto the computation, which uses the
// default convolution dimension numbers.
XlaOp Conv(const XlaOp& lhs, const XlaOp& rhs,
absl::Span<const int64> window_strides, Padding padding,
int64 feature_group_count = 1,
- const PrecisionConfigProto* precision_config_proto = nullptr);
+ const PrecisionConfig* precision_config = nullptr);
// Enqueues a convolution instruction onto the computation, with the caller
// provided padding configuration in the format returned by MakePadding().
-XlaOp ConvWithGeneralPadding(
- const XlaOp& lhs, const XlaOp& rhs, absl::Span<const int64> window_strides,
- absl::Span<const std::pair<int64, int64>> padding,
- int64 feature_group_count = 1,
- const PrecisionConfigProto* precision_config_proto = nullptr);
+XlaOp ConvWithGeneralPadding(const XlaOp& lhs, const XlaOp& rhs,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding,
+ int64 feature_group_count = 1,
+ const PrecisionConfig* precision_config = nullptr);
// Enqueues a convolution instruction onto the computation, with the caller
// provided dimension numbers configuration.
@@ -1657,7 +1651,7 @@ XlaOp ConvWithGeneralDimensions(
const XlaOp& lhs, const XlaOp& rhs, absl::Span<const int64> window_strides,
Padding padding, const ConvolutionDimensionNumbers& dimension_numbers,
int64 feature_group_count = 1,
- const PrecisionConfigProto* precision_config_proto = nullptr);
+ const PrecisionConfig* precision_config = nullptr);
// Enqueues a convolution instruction onto the computation, with the caller
// provided padding configuration as well as the dimension numbers.
@@ -1666,17 +1660,18 @@ XlaOp ConvGeneral(const XlaOp& lhs, const XlaOp& rhs,
absl::Span<const std::pair<int64, int64>> padding,
const ConvolutionDimensionNumbers& dimension_numbers,
int64 feature_group_count = 1,
- const PrecisionConfigProto* precision_config_proto = nullptr);
+ const PrecisionConfig* precision_config = nullptr);
// Enqueues a convolution instruction onto the computation, with the caller
// provided padding configuration, dilation factors and dimension numbers.
-XlaOp ConvGeneralDilated(
- const XlaOp& lhs, const XlaOp& rhs, absl::Span<const int64> window_strides,
- absl::Span<const std::pair<int64, int64>> padding,
- absl::Span<const int64> lhs_dilation, absl::Span<const int64> rhs_dilation,
- const ConvolutionDimensionNumbers& dimension_numbers,
- int64 feature_group_count = 1,
- const PrecisionConfigProto* precision_config_proto = nullptr);
+XlaOp ConvGeneralDilated(const XlaOp& lhs, const XlaOp& rhs,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding,
+ absl::Span<const int64> lhs_dilation,
+ absl::Span<const int64> rhs_dilation,
+ const ConvolutionDimensionNumbers& dimension_numbers,
+ int64 feature_group_count = 1,
+ const PrecisionConfig* precision_config = nullptr);
// Enqueues an FFT instruction onto the computation, of the given type and
// with the given FFT length.
diff --git a/tensorflow/compiler/xla/reference_util.cc b/tensorflow/compiler/xla/reference_util.cc
index 8a05d1b0d7..9f1afa2671 100644
--- a/tensorflow/compiler/xla/reference_util.cc
+++ b/tensorflow/compiler/xla/reference_util.cc
@@ -574,9 +574,9 @@ ReferenceUtil::ConvArray4DGeneralDimensionsDilated(
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
- PrecisionConfigProto precision_config;
+ PrecisionConfig precision_config;
precision_config.mutable_operand_precision()->Resize(
- /*new_size=*/2, PrecisionConfigProto::DEFAULT);
+ /*new_size=*/2, PrecisionConfig::DEFAULT);
b.AddInstruction(HloInstruction::CreateConvolve(
shape, lhs_instruction, rhs_instruction, /*feature_group_count=*/1,
window, dnums, precision_config));
diff --git a/tensorflow/compiler/xla/service/BUILD b/tensorflow/compiler/xla/service/BUILD
index f6cfac6537..ab86dce510 100644
--- a/tensorflow/compiler/xla/service/BUILD
+++ b/tensorflow/compiler/xla/service/BUILD
@@ -989,6 +989,7 @@ tf_cc_test(
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/compiler/xla/tests:hlo_test_base",
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
+ "//tensorflow/core:test",
"@com_google_absl//absl/memory",
],
)
@@ -1036,6 +1037,7 @@ tf_cc_test(
":flatten_call_graph",
":hlo",
":hlo_ordering",
+ ":hlo_schedule",
":hlo_scheduling",
"//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
@@ -1049,6 +1051,7 @@ tf_cc_test(
"//tensorflow/compiler/xla/tests:hlo_verified_test_base",
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
"//tensorflow/core:lib",
+ "//tensorflow/core:test",
"@com_google_absl//absl/memory",
],
)
@@ -1062,6 +1065,7 @@ cc_library(
":hlo",
":hlo_dataflow_analysis",
":hlo_proto",
+ ":hlo_schedule",
":hlo_value",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
@@ -1082,6 +1086,7 @@ tf_cc_test(
":hlo",
":hlo_dataflow_analysis",
":hlo_ordering",
+ ":hlo_schedule",
":hlo_scheduling",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:types",
@@ -1089,6 +1094,7 @@ tf_cc_test(
"//tensorflow/compiler/xla/service:hlo_parser",
"//tensorflow/compiler/xla/tests:hlo_test_base",
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
+ "//tensorflow/core:test",
],
)
@@ -1102,6 +1108,7 @@ cc_library(
":hlo",
":hlo_ordering",
":hlo_proto",
+ ":hlo_schedule",
":tuple_points_to_analysis",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:util",
@@ -1125,6 +1132,7 @@ tf_cc_test(
"//tensorflow/compiler/xla/tests:hlo_test_base",
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
"//tensorflow/core:lib",
+ "//tensorflow/core:test",
"@com_google_absl//absl/memory",
],
)
@@ -1170,6 +1178,43 @@ cc_library(
)
cc_library(
+ name = "hlo_schedule",
+ srcs = ["hlo_schedule.cc"],
+ hdrs = ["hlo_schedule.h"],
+ deps = [
+ ":hlo",
+ "//tensorflow/compiler/xla:status",
+ "//tensorflow/compiler/xla:status_macros",
+ "//tensorflow/compiler/xla:util",
+ "//tensorflow/core:lib_internal",
+ "@com_google_absl//absl/strings",
+ "@com_google_absl//absl/strings:str_format",
+ "@com_google_absl//absl/types:span",
+ ],
+)
+
+tf_cc_test(
+ name = "hlo_schedule_test",
+ srcs = ["hlo_schedule_test.cc"],
+ deps = [
+ ":heap_simulator",
+ ":hlo",
+ ":hlo_dce",
+ ":hlo_ordering",
+ ":hlo_parser",
+ ":hlo_schedule",
+ ":hlo_scheduling",
+ "//tensorflow/compiler/xla:shape_util",
+ "//tensorflow/compiler/xla:types",
+ "//tensorflow/compiler/xla:xla_data_proto",
+ "//tensorflow/compiler/xla/tests:hlo_test_base",
+ "//tensorflow/compiler/xla/tests:xla_internal_test_main",
+ "//tensorflow/core:test",
+ "@com_google_absl//absl/algorithm:container",
+ ],
+)
+
+cc_library(
name = "hlo_scheduling",
srcs = ["hlo_scheduling.cc"],
hdrs = ["hlo_scheduling.h"],
@@ -1177,6 +1222,7 @@ cc_library(
":heap_simulator",
":hlo",
":hlo_ordering",
+ ":hlo_schedule",
":logical_buffer",
":tuple_points_to_analysis",
"//tensorflow/compiler/xla:shape_util",
@@ -1205,6 +1251,7 @@ tf_cc_test(
"//tensorflow/compiler/xla/tests:hlo_test_base",
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
"//tensorflow/core:test",
+ "@com_google_absl//absl/algorithm:container",
],
)
@@ -2366,6 +2413,7 @@ cc_library(
":hlo",
":hlo_dce",
":hlo_ordering",
+ ":hlo_schedule",
":hlo_scheduling",
":logical_buffer",
":tuple_points_to_analysis",
@@ -2520,6 +2568,7 @@ cc_library(
"//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/core:lib",
+ "@com_google_absl//absl/container:inlined_vector",
],
)
@@ -3187,6 +3236,7 @@ cc_library(
"//tensorflow/compiler/xla:util",
"//tensorflow/core:lib",
"@com_google_absl//absl/algorithm:container",
+ "@com_google_absl//absl/container:inlined_vector",
],
)
diff --git a/tensorflow/compiler/xla/service/algebraic_simplifier_test.cc b/tensorflow/compiler/xla/service/algebraic_simplifier_test.cc
index 019840b476..aa40fba9bb 100644
--- a/tensorflow/compiler/xla/service/algebraic_simplifier_test.cc
+++ b/tensorflow/compiler/xla/service/algebraic_simplifier_test.cc
@@ -1013,13 +1013,6 @@ TEST_F(AlgebraicSimplifierTest, PowNegative1) {
1);
}
-PrecisionConfigProto DefaultPrecisionConfig(int operands) {
- PrecisionConfigProto precision_config;
- precision_config.mutable_operand_precision()->Resize(
- operands, PrecisionConfigProto::DEFAULT);
- return precision_config;
-}
-
TEST_F(AlgebraicSimplifierTest, ZeroSizedConvolution) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* lhs = builder.AddInstruction(HloInstruction::CreateParameter(
@@ -2386,9 +2379,9 @@ TEST_P(ConvFilterPaddingTest, DoIt) {
// Add a PrecisionConfig and check that AlgebraicSimplifier keeps it in place
// after the transformation.
- PrecisionConfigProto precision_config;
- precision_config.add_operand_precision(PrecisionConfigProto::HIGH);
- precision_config.add_operand_precision(PrecisionConfigProto::HIGHEST);
+ PrecisionConfig precision_config;
+ precision_config.add_operand_precision(PrecisionConfig::HIGH);
+ precision_config.add_operand_precision(PrecisionConfig::HIGHEST);
orig_conv->set_precision_config(precision_config);
auto module = CreateNewModule();
@@ -2408,9 +2401,8 @@ TEST_P(ConvFilterPaddingTest, DoIt) {
conv->operand(1)->shape().dimensions(2),
conv->operand(1)->shape().dimensions(3),
testcase.expected_conv_window));
- EXPECT_THAT(
- conv->precision_config().operand_precision(),
- ElementsAre(PrecisionConfigProto::HIGH, PrecisionConfigProto::HIGHEST));
+ EXPECT_THAT(conv->precision_config().operand_precision(),
+ ElementsAre(PrecisionConfig::HIGH, PrecisionConfig::HIGHEST));
}
}
diff --git a/tensorflow/compiler/xla/service/bfloat16_normalization_test.cc b/tensorflow/compiler/xla/service/bfloat16_normalization_test.cc
index d480d72297..933cf873e0 100644
--- a/tensorflow/compiler/xla/service/bfloat16_normalization_test.cc
+++ b/tensorflow/compiler/xla/service/bfloat16_normalization_test.cc
@@ -308,9 +308,9 @@ TEST_F(BFloat16NormalizationTest, DoNotAddUnsupportedMixedPrecision) {
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
- PrecisionConfigProto precision_config;
+ PrecisionConfig precision_config;
precision_config.mutable_operand_precision()->Resize(
- 2, PrecisionConfigProto::DEFAULT);
+ 2, PrecisionConfig::DEFAULT);
HloInstruction* dot = builder.AddInstruction(
HloInstruction::CreateDot(bf16_shape, a, b, dot_dnums, precision_config));
diff --git a/tensorflow/compiler/xla/service/buffer_assignment.cc b/tensorflow/compiler/xla/service/buffer_assignment.cc
index 8b8c6bfd26..0f0af57626 100644
--- a/tensorflow/compiler/xla/service/buffer_assignment.cc
+++ b/tensorflow/compiler/xla/service/buffer_assignment.cc
@@ -617,18 +617,24 @@ Status BufferAssignment::ComputeSummaryStats() {
}
// Only compute total fragmentation if all computations have schedules.
- SequentialHloOrdering::HloModuleSequence module_sequence;
+ HloSchedule schedule(module_);
+ bool schedule_complete = true;
for (const auto& computation : module_->computations()) {
- const std::vector<const HloInstruction*>* sequence =
- liveness_->hlo_ordering().SequentialOrder(*computation);
- if (sequence != nullptr) {
- module_sequence.emplace(computation, *sequence);
+ if (!computation->IsFusionComputation()) {
+ const std::vector<const HloInstruction*>* sequence =
+ liveness_->hlo_ordering().SequentialOrder(*computation);
+ if (sequence == nullptr) {
+ schedule_complete = false;
+ } else {
+ schedule.set_sequence(computation, *sequence);
+ }
}
}
- if (module_sequence.size() == module_->computation_count()) {
+ if (schedule_complete) {
+ TF_RETURN_IF_ERROR(schedule.Verify());
TF_ASSIGN_OR_RETURN(
const int64 min_size,
- HeapSimulator::MinimumMemoryForModule(module_sequence, buffer_size_));
+ HeapSimulator::MinimumMemoryForModule(schedule, buffer_size_));
stats_.total_fragmentation_bytes = stats_.total_allocation_bytes - min_size;
}
@@ -1064,7 +1070,7 @@ Status BufferAssigner::AssignBuffersWithSequentialOrdering(
// since buffers for kCall, kWhile, and kConditional sub-computations are
// only live for the duration of their calling instructions.
VLOG(1) << "Running whole-module heap simulation";
- SequentialHloOrdering::HloModuleSequence module_sequence;
+ HloSchedule schedule(&assignment->module());
FlatSet<const LogicalBuffer*> all_buffers_to_assign;
for (const auto& pair : buffers_to_assign_sequentially) {
const HloComputation* computation = pair.first;
@@ -1072,7 +1078,7 @@ Status BufferAssigner::AssignBuffersWithSequentialOrdering(
const std::vector<const HloInstruction*>* instruction_sequence =
hlo_ordering.SequentialOrder(*computation);
CHECK(instruction_sequence != nullptr) << computation->name();
- module_sequence[computation] = *instruction_sequence;
+ schedule.set_sequence(computation, *instruction_sequence);
all_buffers_to_assign.insert(buffers_to_assign.begin(),
buffers_to_assign.end());
}
@@ -1090,7 +1096,7 @@ Status BufferAssigner::AssignBuffersWithSequentialOrdering(
const HeapSimulator::Result result,
HeapSimulator::Run(absl::make_unique<DecreasingSizeRunsHeap>(
absl::make_unique<LazyBestFitHeap>(alignment)),
- assignment->module(), module_sequence,
+ assignment->module(), schedule,
assignment->points_to_analysis(),
assignment->buffer_size_, options));
AssignBuffersFromHeapSimulator(result, assignment,
@@ -1121,7 +1127,7 @@ Status BufferAssigner::AssignBuffersWithSequentialOrdering(
HeapSimulator::Run(
absl::make_unique<DecreasingSizeRunsHeap>(
absl::make_unique<LazyBestFitHeap>(alignment)),
- *computation, *instruction_sequence,
+ *computation, HloInstructionSequence(*instruction_sequence),
assignment->points_to_analysis(), assignment->buffer_size_,
options));
AssignBuffersFromHeapSimulator(result, assignment,
diff --git a/tensorflow/compiler/xla/service/buffer_assignment_test.cc b/tensorflow/compiler/xla/service/buffer_assignment_test.cc
index 7398f105a0..5a231c173d 100644
--- a/tensorflow/compiler/xla/service/buffer_assignment_test.cc
+++ b/tensorflow/compiler/xla/service/buffer_assignment_test.cc
@@ -33,6 +33,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
#include "tensorflow/compiler/xla/service/hlo_ordering.h"
#include "tensorflow/compiler/xla/service/hlo_parser.h"
+#include "tensorflow/compiler/xla/service/hlo_schedule.h"
#include "tensorflow/compiler/xla/service/hlo_scheduling.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/test.h"
@@ -40,6 +41,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/tests/hlo_verified_test_base.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/macros.h"
namespace xla {
@@ -120,14 +122,10 @@ class BufferAssignmentTest : public HloVerifiedTestBase {
HloModule* module,
absl::Span<const HloInstruction* const> instruction_sequence,
int64 alignment = 1) {
- SequentialHloOrdering::HloModuleSequence module_sequence;
- module_sequence[module->entry_computation()] =
- std::vector<const HloInstruction*>(instruction_sequence.begin(),
- instruction_sequence.end());
+ HloSchedule schedule(module);
+ schedule.set_sequence(module->entry_computation(), instruction_sequence);
return BufferAssigner::Run(
- module,
- absl::make_unique<SequentialHloOrdering>(module,
- module_sequence),
+ module, absl::make_unique<SequentialHloOrdering>(schedule),
backend().compiler()->BufferSizeBytesFunction(),
[alignment](LogicalBuffer::Color) { return alignment; },
/*allow_input_output_aliasing=*/false,
@@ -1490,9 +1488,9 @@ TEST_F(BufferAssignmentTest, OneTempAllocation) {
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
- PrecisionConfigProto precision_config;
+ PrecisionConfig precision_config;
precision_config.mutable_operand_precision()->Resize(
- 2, PrecisionConfigProto::DEFAULT);
+ 2, PrecisionConfig::DEFAULT);
auto dot_ab = builder.AddInstruction(HloInstruction::CreateDot(
shape_2x4, param_a, param_b, dot_dnums, precision_config));
auto dot_bc = builder.AddInstruction(HloInstruction::CreateDot(
@@ -1785,11 +1783,10 @@ class WhileBufferAssignmentTest : public HloVerifiedTestBase {
std::unique_ptr<BufferAssignment> RunBufferAssignment(HloModule* module,
int64 alignment = 1) {
- auto sequence =
- ScheduleComputationsInModule(*module, ByteSizeOf).ConsumeValueOrDie();
+ HloSchedule schedule =
+ ScheduleModule(*module, ByteSizeOf).ConsumeValueOrDie();
return BufferAssigner::Run(
- module,
- absl::make_unique<SequentialHloOrdering>(module, sequence),
+ module, absl::make_unique<SequentialHloOrdering>(schedule),
ByteSizeOf,
[alignment](LogicalBuffer::Color) { return alignment; },
/*allow_input_output_aliasing=*/false,
@@ -2096,17 +2093,25 @@ TEST_F(WhileBufferAssignmentTest, ColocatedBuffers) {
// Create a sequential order among all the instructions in the entry
// computation, since the issue this test stresses depends on the order the
// nodes are traversed during BufferAssignment.
- SequentialHloOrdering::HloModuleSequence sequence;
- sequence[module->entry_computation()] = {
- token, infeed, infeed_data, while0, while1, zero, add, while2, tuple};
+ TF_ASSERT_OK_AND_ASSIGN(
+ HloSchedule schedule,
+ ScheduleModule(*module, [](const BufferValue& buffer) {
+ return ShapeUtil::ByteSizeOf(buffer.shape(),
+ /*pointer_size=*/sizeof(void*));
+ }));
+ schedule.set_sequence(
+ module->entry_computation(),
+ {token, infeed, infeed_data, while0, while1, zero, add, while2, tuple});
+ TF_ASSERT_OK(schedule.Verify());
+
TF_ASSERT_OK_AND_ASSIGN(
auto assignment,
- BufferAssigner::Run(
- module, absl::make_unique<SequentialHloOrdering>(module, sequence),
- backend().compiler()->BufferSizeBytesFunction(),
- [](LogicalBuffer::Color) { return 1; },
- /*allow_input_output_aliasing=*/false,
- /*allocate_buffers_for_constants=*/true));
+ BufferAssigner::Run(module,
+ absl::make_unique<SequentialHloOrdering>(schedule),
+ backend().compiler()->BufferSizeBytesFunction(),
+ [](LogicalBuffer::Color) { return 1; },
+ /*allow_input_output_aliasing=*/false,
+ /*allocate_buffers_for_constants=*/true));
// The result tuple elements must be assigned with different buffers.
TF_ASSERT_OK_AND_ASSIGN(auto slice0, assignment->GetUniqueSlice(tuple, {0}));
@@ -2263,29 +2268,6 @@ ENTRY Main {
GetAllocation(*buffers, param0, {1, 1}));
}
-static bool IsPostOrderTraversal(
- const std::vector<const HloInstruction*>& sequence) {
- tensorflow::gtl::FlatSet<const HloInstruction*> seen_so_far;
- auto has_not_been_seen_yet = [&](const HloInstruction* instruction) {
- return seen_so_far.count(instruction) == 0;
- };
-
- for (auto instruction : sequence) {
- if (std::any_of(instruction->operands().begin(),
- instruction->operands().end(), has_not_been_seen_yet) ||
- std::any_of(instruction->control_predecessors().begin(),
- instruction->control_predecessors().end(),
- has_not_been_seen_yet)) {
- return false; // Not a post order.
- }
- if (!seen_so_far.insert(instruction).second) {
- return false; // Not a "traversal".
- }
- }
-
- return true;
-}
-
TEST_F(WhileBufferAssignmentTest, WhileLoopsInterferingResultRange) {
auto module = CreateNewModule();
auto builder = HloComputation::Builder(TestName());
@@ -2340,27 +2322,27 @@ TEST_F(WhileBufferAssignmentTest, WhileLoopsInterferingResultRange) {
RunCopyInsertion(module);
- auto sequence =
- ScheduleComputationsInModule(*module, ByteSizeOf).ConsumeValueOrDie();
+ HloSchedule schedule =
+ ScheduleModule(*module, ByteSizeOf).ConsumeValueOrDie();
- // To trigger b/38494731, we want a specific Hlo sequence for the
+ // To trigger b/38494731, we want a specific Hlo schedule for the
// root computation, so we overwrite that entry with a manually
// crafted sequence.
- sequence[module->entry_computation()] = {
- input1, weights1, one, output1, while1->operand(0), while1,
- input0, weights0, zero, output0, while0->operand(0), while0,
- gte0, gte1, root_add};
+ schedule.set_sequence(module->entry_computation(),
+ {input1, weights1, one, output1, while1->operand(0),
+ while1, input0, weights0, zero, output0,
+ while0->operand(0), while0, gte0, gte1, root_add});
- // If this ASSERT_TRUE fails, we constructed a bogus sequence above
- // and this test itself is buggy.
- ASSERT_TRUE(IsPostOrderTraversal(sequence[module->entry_computation()]));
+ // If this ASSERT fails, we constructed a bogus sequence above and this test
+ // itself is buggy.
+ TF_ASSERT_OK(schedule.Verify());
auto assignment =
- BufferAssigner::Run(
- module, absl::make_unique<SequentialHloOrdering>(module, sequence),
- ByteSizeOf, [](LogicalBuffer::Color) { return 1; },
- /*allow_input_output_aliasing=*/false,
- /*allocate_buffers_for_constants=*/true)
+ BufferAssigner::Run(module,
+ absl::make_unique<SequentialHloOrdering>(schedule),
+ ByteSizeOf, [](LogicalBuffer::Color) { return 1; },
+ /*allow_input_output_aliasing=*/false,
+ /*allocate_buffers_for_constants=*/true)
.ConsumeValueOrDie();
EXPECT_TRUE(BuffersDistinct({while0}, {while1}, *assignment));
diff --git a/tensorflow/compiler/xla/service/buffer_liveness_test.cc b/tensorflow/compiler/xla/service/buffer_liveness_test.cc
index 26e26e316d..414bfe7999 100644
--- a/tensorflow/compiler/xla/service/buffer_liveness_test.cc
+++ b/tensorflow/compiler/xla/service/buffer_liveness_test.cc
@@ -27,6 +27,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/tests/hlo_test_base.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/lib/core/status_test_util.h"
namespace xla {
namespace {
@@ -166,12 +167,12 @@ TEST_F(BufferLivenessTest, MultipleEntryParameters_Sequential) {
auto module = CreateNewModule();
HloComputation* entry = module->AddEntryComputation(builder.Build());
- SequentialHloOrdering::HloModuleSequence sequence;
- sequence.insert({entry, {param0, negate, param1, exp, add}});
- auto liveness = BufferLiveness::Run(module.get(),
- absl::make_unique<SequentialHloOrdering>(
- module.get(), sequence))
- .ConsumeValueOrDie();
+ HloSchedule schedule(module.get());
+ schedule.set_sequence(entry, {param0, negate, param1, exp, add});
+ auto liveness =
+ BufferLiveness::Run(module.get(),
+ absl::make_unique<SequentialHloOrdering>(schedule))
+ .ConsumeValueOrDie();
// Entry parameters interfere as if they are defined simultaneously at
// the very beginning.
@@ -291,13 +292,12 @@ TEST_F(BufferLivenessTest, OverlappedBuffersSequentialOrder) {
auto module = CreateNewModule();
auto computation = module->AddEntryComputation(builder.Build());
- SequentialHloOrdering::HloModuleSequence module_sequence;
- std::vector<const HloInstruction*> order = {param, negate, exp, add};
- module_sequence.emplace(computation, order);
- auto liveness = BufferLiveness::Run(module.get(),
- absl::make_unique<SequentialHloOrdering>(
- module.get(), module_sequence))
- .ConsumeValueOrDie();
+ HloSchedule schedule(module.get());
+ schedule.set_sequence(computation, {param, negate, exp, add});
+ auto liveness =
+ BufferLiveness::Run(module.get(),
+ absl::make_unique<SequentialHloOrdering>(schedule))
+ .ConsumeValueOrDie();
EXPECT_TRUE(InstructionsMayInterfere(*liveness, param, negate));
EXPECT_FALSE(InstructionsMayInterfere(*liveness, param, exp));
@@ -339,14 +339,14 @@ TEST_F(BufferLivenessTest, RootInstructionIsNotLastInSequentialOrder) {
auto module = CreateNewModule();
auto computation = module->AddEntryComputation(builder.Build(add));
- SequentialHloOrdering::HloModuleSequence module_sequence;
- std::vector<const HloInstruction*> order = {param, add, recv,
- recv_done, send, send_done};
- module_sequence.emplace(computation, order);
- auto liveness = BufferLiveness::Run(module.get(),
- absl::make_unique<SequentialHloOrdering>(
- module.get(), module_sequence))
- .ConsumeValueOrDie();
+ HloSchedule schedule(module.get());
+ schedule.set_sequence(computation,
+ {param, add, token, recv, recv_done, send, send_done});
+ TF_ASSERT_OK(schedule.Verify());
+ auto liveness =
+ BufferLiveness::Run(module.get(),
+ absl::make_unique<SequentialHloOrdering>(schedule))
+ .ConsumeValueOrDie();
EXPECT_FALSE(InstructionsMayInterfere(*liveness, param, add));
// Check the root instruction (add) buffer interferes with the recv buffer.
diff --git a/tensorflow/compiler/xla/service/cpu/conv_canonicalization_test.cc b/tensorflow/compiler/xla/service/cpu/conv_canonicalization_test.cc
index 616c453750..05792795a1 100644
--- a/tensorflow/compiler/xla/service/cpu/conv_canonicalization_test.cc
+++ b/tensorflow/compiler/xla/service/cpu/conv_canonicalization_test.cc
@@ -56,13 +56,6 @@ class ConvCanonicalizationTest : public HloTestBase {
static constexpr int kOutputFeatureCount = 64;
};
-PrecisionConfigProto DefaultPrecisionConfig(int operands) {
- PrecisionConfigProto precision_config;
- precision_config.mutable_operand_precision()->Resize(
- operands, PrecisionConfigProto::DEFAULT);
- return precision_config;
-}
-
TEST_F(ConvCanonicalizationTest, NonCanonicalToCanonical) {
auto builder = HloComputation::Builder(TestName());
// The input dimensions are in CNHW order.
diff --git a/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc b/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc
index 796f36510e..e7b6075994 100644
--- a/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc
+++ b/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc
@@ -584,16 +584,14 @@ StatusOr<std::unique_ptr<Executable>> CpuCompiler::RunBackend(
// computation. Using this sequence enables tighter buffer liveness analysis
// and reduced memory usage (as compared to using DependencyHloOrdering).
TF_ASSIGN_OR_RETURN(
- SequentialHloOrdering::HloModuleSequence module_sequence,
- ScheduleComputationsInModule(*module, BufferSizeBytesFunction(),
- DFSMemoryScheduler));
+ HloSchedule schedule,
+ ScheduleModule(*module, BufferSizeBytesFunction(), DFSMemoryScheduler));
// Run buffer allocation on the HLO graph.
TF_ASSIGN_OR_RETURN(
std::unique_ptr<BufferAssignment> assignment,
BufferAssigner::Run(module.get(),
- absl::make_unique<SequentialHloOrdering>(
- module.get(), module_sequence),
+ absl::make_unique<SequentialHloOrdering>(schedule),
BufferSizeBytesFunction(), memory_alignment,
/*allow_input_output_aliasing=*/false,
/*allocate_buffers_for_constants=*/true));
@@ -627,9 +625,10 @@ StatusOr<std::unique_ptr<Executable>> CpuCompiler::RunBackend(
}
TF_RETURN_IF_ERROR(
ir_emitter
- .EmitComputation(embedded_computation, embedded_computation->name(),
- /*is_top_level_computation=*/false,
- &module_sequence.at(embedded_computation))
+ .EmitComputation(
+ embedded_computation, embedded_computation->name(),
+ /*is_top_level_computation=*/false,
+ &schedule.sequence(embedded_computation).instructions())
.status());
}
string function_name_prefix = entry_computation->name().empty()
@@ -637,9 +636,10 @@ StatusOr<std::unique_ptr<Executable>> CpuCompiler::RunBackend(
: entry_computation->name();
TF_ASSIGN_OR_RETURN(
llvm::Function * entry_function,
- ir_emitter.EmitComputation(entry_computation, function_name_prefix,
- /*is_top_level_computation=*/true,
- &module_sequence.at(entry_computation)));
+ ir_emitter.EmitComputation(
+ entry_computation, function_name_prefix,
+ /*is_top_level_computation=*/true,
+ &schedule.sequence(entry_computation).instructions()));
string function_name = [&]() {
llvm::SmallVector<char, 40> function_name_vector;
@@ -771,20 +771,18 @@ CpuCompiler::CompileAheadOfTime(std::vector<std::unique_ptr<HloModule>> modules,
VLOG(2) << "After optimization:";
XLA_VLOG_LINES(2, module->ToString());
- TF_ASSIGN_OR_RETURN(
- SequentialHloOrdering::HloModuleSequence module_sequence,
- ScheduleComputationsInModule(*module, BufferSizeBytesFunction()));
+ TF_ASSIGN_OR_RETURN(HloSchedule schedule,
+ ScheduleModule(*module, BufferSizeBytesFunction()));
// Run buffer analysis on the HLO graph. This analysis figures out which
// temporary buffers are required to run the computation.
TF_ASSIGN_OR_RETURN(
std::unique_ptr<BufferAssignment> assignment,
- BufferAssigner::Run(
- module,
- absl::make_unique<SequentialHloOrdering>(module, module_sequence),
- BufferSizeBytesFunction(), memory_alignment,
- /*allow_input_output_aliasing=*/false,
- /*allocate_buffers_for_constants=*/true));
+ BufferAssigner::Run(module,
+ absl::make_unique<SequentialHloOrdering>(schedule),
+ BufferSizeBytesFunction(), memory_alignment,
+ /*allow_input_output_aliasing=*/false,
+ /*allocate_buffers_for_constants=*/true));
// BufferAssignment::ToString() includes a header, so no need for us to
// print one ourselves.
XLA_VLOG_LINES(2, assignment->ToString());
@@ -824,18 +822,18 @@ CpuCompiler::CompileAheadOfTime(std::vector<std::unique_ptr<HloModule>> modules,
}
TF_RETURN_IF_ERROR(
ir_emitter
- .EmitComputation(embedded_computation,
- embedded_computation->name(),
- /*is_top_level_computation=*/false,
- &module_sequence.at(embedded_computation))
+ .EmitComputation(
+ embedded_computation, embedded_computation->name(),
+ /*is_top_level_computation=*/false,
+ &schedule.sequence(embedded_computation).instructions())
.status());
}
const string& entry_point_name = options.entry_point_name();
- TF_ASSIGN_OR_RETURN(
- llvm::Function * entry_function,
- ir_emitter.EmitComputation(computation, entry_point_name,
- /*is_top_level_computation=*/true,
- &module_sequence.at(computation)));
+ TF_ASSIGN_OR_RETURN(llvm::Function * entry_function,
+ ir_emitter.EmitComputation(
+ computation, entry_point_name,
+ /*is_top_level_computation=*/true,
+ &schedule.sequence(computation).instructions()));
CHECK(entry_function->getName() == llvm_ir::AsStringRef(entry_point_name));
diff --git a/tensorflow/compiler/xla/service/cpu/cpu_instruction_fusion_test.cc b/tensorflow/compiler/xla/service/cpu/cpu_instruction_fusion_test.cc
index 6bd0a2dd90..0fea462c85 100644
--- a/tensorflow/compiler/xla/service/cpu/cpu_instruction_fusion_test.cc
+++ b/tensorflow/compiler/xla/service/cpu/cpu_instruction_fusion_test.cc
@@ -38,9 +38,9 @@ std::unique_ptr<HloInstruction> MakeDot(const Shape& shape, HloInstruction* lhs,
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
- PrecisionConfigProto precision_config;
+ PrecisionConfig precision_config;
precision_config.mutable_operand_precision()->Resize(
- 2, PrecisionConfigProto::DEFAULT);
+ 2, PrecisionConfig::DEFAULT);
return HloInstruction::CreateDot(shape, lhs, rhs, dot_dnums,
precision_config);
}
diff --git a/tensorflow/compiler/xla/service/cpu/ir_emitter.cc b/tensorflow/compiler/xla/service/cpu/ir_emitter.cc
index e5cf15c686..df8c2a636b 100644
--- a/tensorflow/compiler/xla/service/cpu/ir_emitter.cc
+++ b/tensorflow/compiler/xla/service/cpu/ir_emitter.cc
@@ -110,7 +110,7 @@ IrEmitter::IrEmitter(
StatusOr<llvm::Function*> IrEmitter::EmitComputation(
HloComputation* computation, const string& function_name_prefix,
bool is_top_level_computation,
- std::vector<const HloInstruction*>* instruction_order) {
+ const std::vector<const HloInstruction*>* instruction_order) {
string function_name = name_uniquer_.GetUniqueName(function_name_prefix);
VLOG(2) << "Emitting IR for CPU function [" << function_name_prefix
<< "]; ordered? " << (instruction_order != nullptr);
diff --git a/tensorflow/compiler/xla/service/cpu/ir_emitter.h b/tensorflow/compiler/xla/service/cpu/ir_emitter.h
index 58a333b8fb..3df99464ba 100644
--- a/tensorflow/compiler/xla/service/cpu/ir_emitter.h
+++ b/tensorflow/compiler/xla/service/cpu/ir_emitter.h
@@ -98,7 +98,7 @@ class IrEmitter : public DfsHloVisitorWithDefault,
StatusOr<llvm::Function*> EmitComputation(
HloComputation* computation, const string& function_name_prefix,
bool is_top_level_computation,
- std::vector<const HloInstruction*>* instruction_order);
+ const std::vector<const HloInstruction*>* instruction_order);
llvm::IRBuilder<>* b() { return &b_; }
diff --git a/tensorflow/compiler/xla/service/gpu/BUILD b/tensorflow/compiler/xla/service/gpu/BUILD
index a68b7a1bef..13ccff35f8 100644
--- a/tensorflow/compiler/xla/service/gpu/BUILD
+++ b/tensorflow/compiler/xla/service/gpu/BUILD
@@ -813,6 +813,7 @@ cc_library(
"//tensorflow/compiler/xla/service:hlo",
"//tensorflow/compiler/xla/service:hlo_ordering",
"//tensorflow/compiler/xla/service:hlo_reachability",
+ "//tensorflow/compiler/xla/service:hlo_schedule",
"//tensorflow/compiler/xla/service:hlo_scheduling",
"@com_google_absl//absl/memory",
],
diff --git a/tensorflow/compiler/xla/service/gpu/cudnn_convolution_rewriter_test.cc b/tensorflow/compiler/xla/service/gpu/cudnn_convolution_rewriter_test.cc
index 9b46bfc098..bda8ebe579 100644
--- a/tensorflow/compiler/xla/service/gpu/cudnn_convolution_rewriter_test.cc
+++ b/tensorflow/compiler/xla/service/gpu/cudnn_convolution_rewriter_test.cc
@@ -95,13 +95,6 @@ class CudnnConvolutionRewriterTest : public HloVerifiedTestBase {
ConvolutionDimensionNumbers tf_default_dnums_for_backward_input_;
};
-PrecisionConfigProto DefaultPrecisionConfig(int operands) {
- PrecisionConfigProto precision_config;
- precision_config.mutable_operand_precision()->Resize(
- operands, PrecisionConfigProto::DEFAULT);
- return precision_config;
-}
-
TEST_F(CudnnConvolutionRewriterTest, BackwardFilterConvolve) {
HloComputation::Builder builder(TestName());
HloInstruction* activations =
diff --git a/tensorflow/compiler/xla/service/gpu/gpu_hlo_schedule.cc b/tensorflow/compiler/xla/service/gpu/gpu_hlo_schedule.cc
index 743035a84e..ea9376e101 100644
--- a/tensorflow/compiler/xla/service/gpu/gpu_hlo_schedule.cc
+++ b/tensorflow/compiler/xla/service/gpu/gpu_hlo_schedule.cc
@@ -22,6 +22,7 @@ limitations under the License.
#include "absl/memory/memory.h"
#include "tensorflow/compiler/xla/service/buffer_value.h"
#include "tensorflow/compiler/xla/service/hlo_reachability.h"
+#include "tensorflow/compiler/xla/service/hlo_schedule.h"
#include "tensorflow/compiler/xla/service/hlo_scheduling.h"
#include "tensorflow/compiler/xla/types.h"
@@ -198,11 +199,12 @@ StatusOr<std::unique_ptr<GpuHloSchedule>> GpuHloSchedule::Build(
// All kernels are launched on a single stream, so there's no loss of
// concurrency by optimizing for minimal memory usage.
TF_ASSIGN_OR_RETURN(
- schedule->thunk_launch_order_,
- ScheduleOneComputation(
+ HloInstructionSequence sequence,
+ ScheduleComputation(
*entry_computation, [pointer_size](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(), pointer_size);
}));
+ schedule->thunk_launch_order_ = sequence.instructions();
} else {
// BFS tends to increase concurrency, but also increases memory usage.
BFSLaunchOrder(entry_computation, &schedule->thunk_launch_order_);
diff --git a/tensorflow/compiler/xla/service/gpu/gpu_hlo_schedule.h b/tensorflow/compiler/xla/service/gpu/gpu_hlo_schedule.h
index 30a0e7cecd..07a7fc67aa 100644
--- a/tensorflow/compiler/xla/service/gpu/gpu_hlo_schedule.h
+++ b/tensorflow/compiler/xla/service/gpu/gpu_hlo_schedule.h
@@ -33,7 +33,9 @@ namespace gpu {
// launches, because thunks may be scheduled onto concurrent streams. This
// schedule is used by BufferAssigner to determine buffer liveness (i.e. to
// minimize allocations), and also by ThunkSchedule to determine the thunk
-// launch order.
+// launch order. This class differs from xla::HloSchedule in that HloSchedule
+// represents a total order of all instructions in the module for backends which
+// execute HLO instructions strictly sequentially.
class GpuHloSchedule {
public:
// Constructs an GpuHloSchedule for the given module, based on the given
diff --git a/tensorflow/compiler/xla/service/gpu/multi_output_fusion_test.cc b/tensorflow/compiler/xla/service/gpu/multi_output_fusion_test.cc
index c822c94f1b..8a6e5327e0 100644
--- a/tensorflow/compiler/xla/service/gpu/multi_output_fusion_test.cc
+++ b/tensorflow/compiler/xla/service/gpu/multi_output_fusion_test.cc
@@ -259,7 +259,7 @@ TEST_F(MultiOutputFusionTest, MultiOutputFusionTwoLoops) {
TEST_F(MultiOutputFusionTest, MultiOutputFusionLoopReduceToInputFusion) {
// Fusing a reduce into a loop fusion would require changing the fusion kind.
// That's not supported yet.
- auto module = ParseHloString(tensorflow::strings::StrCat(kModulePrefix, R"(
+ auto module = ParseHloString(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[6400]{0} parameter(0)
ROOT mul = f32[6400]{0} multiply(p0.1, p0.1)
@@ -277,7 +277,7 @@ TEST_F(MultiOutputFusionTest, MultiOutputFusionLoopReduceToInputFusion) {
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionLoopElementwise) {
- auto module = ParseHloString(tensorflow::strings::StrCat(kModulePrefix, R"(
+ auto module = ParseHloString(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[6400]{0} parameter(0)
ROOT mul = f32[6400]{0} multiply(p0.1, p0.1)
@@ -301,7 +301,7 @@ TEST_F(MultiOutputFusionTest, MultiOutputFusionLoopElementwise) {
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionSiblingLoopsDifferentShapes) {
- auto module = ParseHloString(tensorflow::strings::StrCat(kModulePrefix, R"(
+ auto module = ParseHloString(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)
ROOT mul = f32[8,1,5,16,1,1]{5,4,3,2,1,0} multiply(p0.1, p0.1)
@@ -324,7 +324,7 @@ TEST_F(MultiOutputFusionTest, MultiOutputFusionSiblingLoopsDifferentShapes) {
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionSiblingLoopAndMultiOutputLoop) {
- auto module = ParseHloString(tensorflow::strings::StrCat(kModulePrefix, R"(
+ auto module = ParseHloString(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)
mul = f32[8,1,5,16,1,1]{5,4,3,2,1,0} multiply(p0.1, p0.1)
@@ -358,7 +358,7 @@ TEST_F(MultiOutputFusionTest, MultiOutputFusionSiblingLoopAndMultiOutputLoop) {
TEST_F(MultiOutputFusionTest,
MultiOutputFusionSiblingLoopAndMultiOutputLoopDifferentShapes) {
- auto module = ParseHloString(tensorflow::strings::StrCat(kModulePrefix, R"(
+ auto module = ParseHloString(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)
mul = f32[8,1,5,16,1,1]{5,4,3,2,1,0} multiply(p0.1, p0.1)
diff --git a/tensorflow/compiler/xla/service/graphviz_example.cc b/tensorflow/compiler/xla/service/graphviz_example.cc
index 0a49d85c6d..ef70b68877 100644
--- a/tensorflow/compiler/xla/service/graphviz_example.cc
+++ b/tensorflow/compiler/xla/service/graphviz_example.cc
@@ -112,9 +112,9 @@ std::unique_ptr<HloModule> MakeBigGraph() {
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
- PrecisionConfigProto precision_config;
+ PrecisionConfig precision_config;
precision_config.mutable_operand_precision()->Resize(
- /*new_size=*/2, PrecisionConfigProto::DEFAULT);
+ /*new_size=*/2, PrecisionConfig::DEFAULT);
auto dot = builder.AddInstruction(HloInstruction::CreateDot(
vshape, clamp, param_v0, dot_dnums, precision_config));
auto tuple = builder.AddInstruction(
diff --git a/tensorflow/compiler/xla/service/heap_simulator.cc b/tensorflow/compiler/xla/service/heap_simulator.cc
index 38c3982ebf..e0f3a7e0e2 100644
--- a/tensorflow/compiler/xla/service/heap_simulator.cc
+++ b/tensorflow/compiler/xla/service/heap_simulator.cc
@@ -29,13 +29,13 @@ using tensorflow::gtl::FlatSet;
/*static*/
StatusOr<int64> HeapSimulator::MinimumMemoryForModule(
- const SequentialHloOrdering::HloModuleSequence& module_sequence,
+ const HloSchedule& schedule,
const LogicalBuffer::SizeFunction& size_function) {
- if (module_sequence.empty()) {
+ if (schedule.empty()) {
return 0;
}
- const HloModule* module = module_sequence.begin()->first->parent();
+ const HloModule* module = schedule.module();
TF_ASSIGN_OR_RETURN(std::unique_ptr<TuplePointsToAnalysis> points_to_analysis,
TuplePointsToAnalysis::Run(module));
@@ -47,14 +47,13 @@ StatusOr<int64> HeapSimulator::MinimumMemoryForModule(
TF_ASSIGN_OR_RETURN(
HeapSimulator::Result result,
HeapSimulator::Run(absl::make_unique<NoFragmentationStatsHeap>(), *module,
- module_sequence, *points_to_analysis, size_function));
+ schedule, *points_to_analysis, size_function));
return result.heap_size;
}
/*static*/
StatusOr<int64> HeapSimulator::MinimumMemoryForComputation(
- const HloComputation& computation,
- const std::vector<const HloInstruction*>& sequence,
+ const HloComputation& computation, const HloInstructionSequence& sequence,
const TuplePointsToAnalysis& points_to_analysis,
const LogicalBuffer::SizeFunction& size_function,
const tensorflow::gtl::FlatMap<const HloComputation*, int64>*
@@ -71,13 +70,13 @@ StatusOr<int64> HeapSimulator::MinimumMemoryForComputation(
/*static*/
StatusOr<HeapSimulator::Result> HeapSimulator::Run(
std::unique_ptr<HeapAlgorithm> algorithm, const HloModule& module,
- const SequentialHloOrdering::HloModuleSequence& module_sequence,
+ const HloSchedule& schedule,
const TuplePointsToAnalysis& points_to_analysis,
const BufferValue::SizeFunction& size_fn, const Options& options) {
- HeapSimulator heap(std::move(algorithm), size_fn, options, &module_sequence);
+ HeapSimulator heap(std::move(algorithm), size_fn, options, &schedule);
const HloComputation* entry_computation = module.entry_computation();
- const std::vector<const HloInstruction*>& instruction_sequence =
- FindOrDie(module_sequence, entry_computation);
+ const HloInstructionSequence& instruction_sequence =
+ schedule.sequence(entry_computation);
TF_RETURN_IF_ERROR(heap.RunComputation(
*entry_computation, instruction_sequence, points_to_analysis));
return heap.Finish();
@@ -86,13 +85,13 @@ StatusOr<HeapSimulator::Result> HeapSimulator::Run(
/*static*/
StatusOr<HeapSimulator::Result> HeapSimulator::Run(
std::unique_ptr<HeapAlgorithm> algorithm, const HloComputation& computation,
- const std::vector<const HloInstruction*>& instruction_sequence,
+ const HloInstructionSequence& instruction_sequence,
const TuplePointsToAnalysis& points_to_analysis,
const BufferValue::SizeFunction& size_fn, const Options& options,
const tensorflow::gtl::FlatMap<const HloComputation*, int64>*
memory_by_computation) {
HeapSimulator heap(std::move(algorithm), size_fn, options,
- /*module_sequence=*/nullptr, memory_by_computation);
+ /*schedule=*/nullptr, memory_by_computation);
TF_RETURN_IF_ERROR(heap.RunComputation(computation, instruction_sequence,
points_to_analysis));
return heap.Finish();
@@ -102,7 +101,7 @@ StatusOr<HeapSimulator::Result> HeapSimulator::Run(
// 'instruction_sequence'.
Status HeapSimulator::RunComputation(
const HloComputation& computation,
- const std::vector<const HloInstruction*>& instruction_sequence,
+ const HloInstructionSequence& instruction_sequence,
const TuplePointsToAnalysis& points_to_analysis) {
VLOG(3) << "Computation:\n" << computation.ToString();
// The goal here is to minimize memory usage, assuming the given sequential
@@ -133,7 +132,8 @@ Status HeapSimulator::RunComputation(
// set of instructions that need to be visited contains all users of all
// aliases, that is, all users of all instructions that have the buffer
// contained in their points-to set.
- for (const HloInstruction* instruction : instruction_sequence) {
+ for (const HloInstruction* instruction :
+ instruction_sequence.instructions()) {
const PointsToSet& points_to =
points_to_analysis.GetPointsToSet(instruction);
const PointsToSet::BufferSet& buffer_set = points_to.CreateFlattenedSet();
@@ -166,7 +166,8 @@ Status HeapSimulator::RunComputation(
std::vector<const BufferValue*> dead_buffers_to_free;
std::vector<const BufferValue*> operand_buffers_to_free;
- for (const HloInstruction* instruction : instruction_sequence) {
+ for (const HloInstruction* instruction :
+ instruction_sequence.instructions()) {
const TuplePointsToAnalysis::BufferDefinitionVector&
buffers_defined_by_instruction =
points_to_analysis.GetBuffersDefinedByInstruction(instruction);
@@ -285,14 +286,14 @@ Status HeapSimulator::RunComputation(
// The order that the sub-computations are simulated does not affect
// correctness; since the whole module has been scheduled, we know that the
// sub-computations will never be run concurrently.
- if (module_sequence_ != nullptr) {
+ if (schedule_ != nullptr) {
if (instruction->opcode() == HloOpcode::kCall ||
instruction->opcode() == HloOpcode::kConditional ||
instruction->opcode() == HloOpcode::kWhile) {
for (const HloComputation* called_computation :
instruction->called_computations()) {
- const std::vector<const HloInstruction*>& called_sequence =
- FindOrDie(*module_sequence_, called_computation);
+ const HloInstructionSequence& called_sequence =
+ schedule_->sequence(called_computation);
TF_RETURN_IF_ERROR(RunComputation(
*called_computation, called_sequence, points_to_analysis));
}
@@ -343,16 +344,16 @@ Status HeapSimulator::RunComputation(
HeapSimulator::HeapSimulator(
std::unique_ptr<HeapAlgorithm> algorithm,
const BufferValue::SizeFunction& size_fn, const Options& options,
- const SequentialHloOrdering::HloModuleSequence* module_sequence,
+ const HloSchedule* schedule,
const tensorflow::gtl::FlatMap<const HloComputation*, int64>*
memory_by_computation)
: no_fragmentation_stats_(absl::make_unique<NoFragmentationStatsHeap>()),
algorithm_(std::move(algorithm)),
size_fn_(size_fn),
options_(options),
- module_sequence_(module_sequence),
+ schedule_(schedule),
memory_by_computation_(memory_by_computation) {
- debug_trace_.set_whole_module_simulation(module_sequence_ != nullptr);
+ debug_trace_.set_whole_module_simulation(schedule_ != nullptr);
}
HeapSimulator::~HeapSimulator() {}
diff --git a/tensorflow/compiler/xla/service/heap_simulator.h b/tensorflow/compiler/xla/service/heap_simulator.h
index af05bedee7..ffbf947d5a 100644
--- a/tensorflow/compiler/xla/service/heap_simulator.h
+++ b/tensorflow/compiler/xla/service/heap_simulator.h
@@ -27,6 +27,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_ordering.h"
+#include "tensorflow/compiler/xla/service/hlo_schedule.h"
#include "tensorflow/compiler/xla/service/tuple_points_to_analysis.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/core/lib/gtl/flatmap.h"
@@ -88,23 +89,22 @@ class HeapSimulator {
// Returns the minimum memory required to compute an HLO module where all
// computations have been scheduled (represented by the given
- // module_sequence), assuming no fragmentation.
+ // schedule), assuming no fragmentation.
static StatusOr<int64> MinimumMemoryForModule(
- const SequentialHloOrdering::HloModuleSequence& module_sequence,
+ const HloSchedule& schedule,
const LogicalBuffer::SizeFunction& size_function);
// Returns the minimum memory required to compute the given computation,
// assuming no fragmentation.
static StatusOr<int64> MinimumMemoryForComputation(
- const HloComputation& computation,
- const std::vector<const HloInstruction*>& sequence,
+ const HloComputation& computation, const HloInstructionSequence& sequence,
const TuplePointsToAnalysis& points_to_analysis,
const LogicalBuffer::SizeFunction& size_function,
const tensorflow::gtl::FlatMap<const HloComputation*, int64>*
memory_by_computation = nullptr);
// Run the heap simulation with the given algorithm, assuming the given
- // module_sequence, which must contain a topologically-consistent total
+ // schedule, which must contain a topologically-consistent total
// ordering of all instructions within each computation. The result is invalid
// if instructions are not run in exactly this sequence.
//
@@ -112,12 +112,12 @@ class HeapSimulator {
// to running on a per-computation basis, since we can re-use buffer space for
// called sub-computations.
//
- static StatusOr<Result> Run(
- std::unique_ptr<HeapAlgorithm> algorithm, const HloModule& module,
- const SequentialHloOrdering::HloModuleSequence& module_sequence,
- const TuplePointsToAnalysis& points_to_analysis,
- const BufferValue::SizeFunction& size_fn,
- const Options& options = Options());
+ static StatusOr<Result> Run(std::unique_ptr<HeapAlgorithm> algorithm,
+ const HloModule& module,
+ const HloSchedule& schedule,
+ const TuplePointsToAnalysis& points_to_analysis,
+ const BufferValue::SizeFunction& size_fn,
+ const Options& options = Options());
// Same as above, but runs on a single computation. The 'instruction_sequence'
// must contain a topologically-consistent total ordering of all instructions
@@ -126,7 +126,7 @@ class HeapSimulator {
static StatusOr<Result> Run(
std::unique_ptr<HeapAlgorithm> algorithm,
const HloComputation& computation,
- const std::vector<const HloInstruction*>& instruction_sequence,
+ const HloInstructionSequence& instruction_sequence,
const TuplePointsToAnalysis& points_to_analysis,
const BufferValue::SizeFunction& size_fn,
const Options& options = Options(),
@@ -134,21 +134,19 @@ class HeapSimulator {
memory_by_computation = nullptr);
private:
- // If 'module_sequence' is non-null, it is used to find kCall and kWhile
+ // If 'schedule' is non-null, it is used to find kCall and kWhile
// sub-computations, and the heap simulation for those sub-computations will
// be run recursively. I.e. the simulation is run over the whole module.
- HeapSimulator(
- std::unique_ptr<HeapAlgorithm> algorithm,
- const BufferValue::SizeFunction& size_fn, const Options& options,
- const SequentialHloOrdering::HloModuleSequence* module_sequence = nullptr,
- const tensorflow::gtl::FlatMap<const HloComputation*, int64>*
- memory_by_computation = nullptr);
+ HeapSimulator(std::unique_ptr<HeapAlgorithm> algorithm,
+ const BufferValue::SizeFunction& size_fn,
+ const Options& options, const HloSchedule* schedule = nullptr,
+ const tensorflow::gtl::FlatMap<const HloComputation*, int64>*
+ memory_by_computation = nullptr);
~HeapSimulator();
- Status RunComputation(
- const HloComputation& computation,
- const std::vector<const HloInstruction*>& instruction_sequence,
- const TuplePointsToAnalysis& points_to_analysis);
+ Status RunComputation(const HloComputation& computation,
+ const HloInstructionSequence& instruction_sequence,
+ const TuplePointsToAnalysis& points_to_analysis);
bool IgnoreBuffer(const BufferValue* buffer) const;
void Alloc(const BufferValue* buffer, const HloInstruction* instruction);
@@ -169,11 +167,11 @@ class HeapSimulator {
const std::unique_ptr<HeapAlgorithm> algorithm_;
const BufferValue::SizeFunction size_fn_;
const Options options_;
- // module_sequence_ is set by buffer assignment, and memory_by_computation_ is
+ // schedule_ is set by buffer assignment, and memory_by_computation_ is
// set by hlo scheduling. Then, in RunComputation, we check both in order to
// handle subcomputations. It would be good to unify the handling of
// subcomputations, but it's not clear how.
- const SequentialHloOrdering::HloModuleSequence* module_sequence_;
+ const HloSchedule* schedule_;
const tensorflow::gtl::FlatMap<const HloComputation*, int64>*
memory_by_computation_;
diff --git a/tensorflow/compiler/xla/service/heap_simulator_test.cc b/tensorflow/compiler/xla/service/heap_simulator_test.cc
index 576c5ff7a4..00a25db467 100644
--- a/tensorflow/compiler/xla/service/heap_simulator_test.cc
+++ b/tensorflow/compiler/xla/service/heap_simulator_test.cc
@@ -30,6 +30,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/tuple_points_to_analysis.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/tests/hlo_test_base.h"
+#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/gtl/flatmap.h"
namespace xla {
@@ -85,13 +86,16 @@ TEST_F(MinimumMemoryForSequenceTest, MultiComputation) {
return ShapeUtil::ByteSizeOf(buffer.shape(), /*pointer_size=*/8);
};
- SequentialHloOrdering::HloModuleSequence module_sequence;
- module_sequence[cond_computation] = {cond_param, cond_iter, cond_data,
- cond_lt};
- module_sequence[body_computation] = {body_param};
- module_sequence[entry_computation] = {iter, data, tuple, while_op};
- EXPECT_EQ(56, HeapSimulator::MinimumMemoryForModule(module_sequence, size_fn)
- .ValueOrDie());
+ HloSchedule schedule(module.get());
+ schedule.set_sequence(cond_computation,
+ {cond_param, cond_iter, cond_data, cond_lt});
+ schedule.set_sequence(body_computation, {body_param});
+ schedule.set_sequence(entry_computation, {iter, data, tuple, while_op});
+ TF_ASSERT_OK(schedule.Verify());
+
+ EXPECT_EQ(
+ 56,
+ HeapSimulator::MinimumMemoryForModule(schedule, size_fn).ValueOrDie());
}
const char kAlloc[] = "Alloc";
@@ -149,10 +153,11 @@ class HeapSimulatorTracker {
auto zero_size = [](const BufferValue& buffer) { return 0; };
auto algorithm = absl::make_unique<DecreasingSizeRunsHeap>(
absl::make_unique<HeapCallRecorder>(&actual_calls_));
- result_ = HeapSimulator::Run(
- std::move(algorithm), *module_->entry_computation(),
- instruction_sequence, *points_to_analysis_, zero_size)
- .ConsumeValueOrDie();
+ result_ =
+ HeapSimulator::Run(std::move(algorithm), *module_->entry_computation(),
+ HloInstructionSequence(instruction_sequence),
+ *points_to_analysis_, zero_size)
+ .ConsumeValueOrDie();
}
explicit HeapSimulatorTracker(const string& name) {
@@ -168,11 +173,12 @@ class HeapSimulatorTracker {
TuplePointsToAnalysis::Run(module_.get()).ConsumeValueOrDie();
// Construct the module sequence grouped by computation.
- SequentialHloOrdering::HloModuleSequence module_sequence;
+ HloSchedule schedule(module_.get());
tensorflow::gtl::FlatMap<const HloInstruction*, int> reverse_position;
for (int i = 0; i < full_module_sequence.size(); ++i) {
const HloInstruction* instruction = full_module_sequence[i];
- module_sequence[instruction->parent()].push_back(instruction);
+ schedule.GetOrCreateSequence(instruction->parent())
+ .push_back(instruction);
reverse_position[instruction] = full_module_sequence.size() - i;
}
@@ -185,8 +191,8 @@ class HeapSimulatorTracker {
};
auto algorithm = absl::make_unique<DecreasingSizeRunsHeap>(
absl::make_unique<HeapCallRecorder>(&actual_calls_));
- result_ = HeapSimulator::Run(std::move(algorithm), *module_,
- module_sequence, *points_to_analysis_, size_fn)
+ result_ = HeapSimulator::Run(std::move(algorithm), *module_, schedule,
+ *points_to_analysis_, size_fn)
.ConsumeValueOrDie();
}
@@ -353,13 +359,6 @@ TEST_F(HeapSimulatorTest, BufferReusedOnce) {
(neg_buffer == output_buffer_1));
}
-PrecisionConfigProto DefaultPrecisionConfig(int operands) {
- PrecisionConfigProto precision_config;
- precision_config.mutable_operand_precision()->Resize(
- operands, PrecisionConfigProto::DEFAULT);
- return precision_config;
-}
-
TEST_F(HeapSimulatorTest, MultiplyDot) {
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
diff --git a/tensorflow/compiler/xla/service/hlo.proto b/tensorflow/compiler/xla/service/hlo.proto
index 58b7af93eb..99d0cf50ca 100644
--- a/tensorflow/compiler/xla/service/hlo.proto
+++ b/tensorflow/compiler/xla/service/hlo.proto
@@ -172,7 +172,7 @@ message HloInstructionProto {
xla.ScatterDimensionNumbers scatter_dimension_numbers = 48;
// Precision configuration for the instruction. Has backend-specific meaning.
- xla.PrecisionConfigProto precision_config = 51;
+ xla.PrecisionConfig precision_config = 51;
// Collective permute field.
repeated SourceTarget source_target_pairs = 52;
diff --git a/tensorflow/compiler/xla/service/hlo_alias_analysis_test.cc b/tensorflow/compiler/xla/service/hlo_alias_analysis_test.cc
index 54abe3345d..0cd0ab36fc 100644
--- a/tensorflow/compiler/xla/service/hlo_alias_analysis_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_alias_analysis_test.cc
@@ -885,18 +885,20 @@ TEST_F(HloAliasAnalysisTest, WhileInterference) {
// For a sequential order, if there is interference iff the negate is after
// the while.
- SequentialHloOrdering::HloModuleSequence sequence;
- sequence[body] = {body_param, body_root};
- sequence[condition] = {cond_param, cond_root};
+ HloSchedule schedule(module_);
+ schedule.set_sequence(body, {body_param, body_root});
+ schedule.set_sequence(condition, {cond_param, cond_root});
{
- sequence[entry] = {init, xla_while, negate, entry_root};
- SequentialHloOrdering ordering(module_, sequence);
+ schedule.set_sequence(entry, {init, xla_while, negate, entry_root});
+ TF_ASSERT_OK(schedule.Verify());
+ SequentialHloOrdering ordering(schedule);
EXPECT_TRUE(analysis.HasLiveRangeInterference(ordering));
}
{
- sequence[entry] = {init, negate, xla_while, entry_root};
- SequentialHloOrdering ordering(module_, sequence);
+ schedule.set_sequence(entry, {init, negate, xla_while, entry_root});
+ TF_ASSERT_OK(schedule.Verify());
+ SequentialHloOrdering ordering(schedule);
EXPECT_FALSE(analysis.HasLiveRangeInterference(ordering));
}
}
diff --git a/tensorflow/compiler/xla/service/hlo_computation_test.cc b/tensorflow/compiler/xla/service/hlo_computation_test.cc
index a2c1ce34c6..2aaaef1d36 100644
--- a/tensorflow/compiler/xla/service/hlo_computation_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_computation_test.cc
@@ -601,9 +601,9 @@ TEST_F(HloComputationTest, Stringification) {
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
- PrecisionConfigProto precision_config;
+ PrecisionConfig precision_config;
precision_config.mutable_operand_precision()->Resize(
- 2, PrecisionConfigProto::DEFAULT);
+ 2, PrecisionConfig::DEFAULT);
builder.AddInstruction(
HloInstruction::CreateDot(sout, x, reshape, dot_dnums, precision_config));
auto module = CreateNewModule();
@@ -636,9 +636,9 @@ TEST_F(HloComputationTest, StringificationIndent) {
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
- PrecisionConfigProto precision_config;
+ PrecisionConfig precision_config;
precision_config.mutable_operand_precision()->Resize(
- 2, PrecisionConfigProto::DEFAULT);
+ 2, PrecisionConfig::DEFAULT);
builder.AddInstruction(
HloInstruction::CreateDot(sout, x, reshape, dot_dnums, precision_config));
auto module = CreateNewModule();
@@ -672,9 +672,9 @@ TEST_F(HloComputationTest, StringificationCanonical) {
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
- PrecisionConfigProto precision_config;
+ PrecisionConfig precision_config;
precision_config.mutable_operand_precision()->Resize(
- 2, PrecisionConfigProto::DEFAULT);
+ 2, PrecisionConfig::DEFAULT);
builder.AddInstruction(
HloInstruction::CreateDot(sout, x, reshape, dot_dnums, precision_config));
auto module = CreateNewModule();
diff --git a/tensorflow/compiler/xla/service/hlo_creation_utils.cc b/tensorflow/compiler/xla/service/hlo_creation_utils.cc
index a6ae0337a5..a3fcc0fefa 100644
--- a/tensorflow/compiler/xla/service/hlo_creation_utils.cc
+++ b/tensorflow/compiler/xla/service/hlo_creation_utils.cc
@@ -63,7 +63,7 @@ StatusOr<HloInstruction*> MakeSliceHlo(HloInstruction* operand,
StatusOr<HloInstruction*> MakeConvolveHlo(
HloInstruction* lhs, HloInstruction* rhs, int64 feature_group_count,
const Window& window, const ConvolutionDimensionNumbers& dimension_numbers,
- const PrecisionConfigProto& precision_config) {
+ const PrecisionConfig& precision_config) {
HloComputation* computation = lhs->parent();
CHECK_EQ(computation, rhs->parent());
TF_ASSIGN_OR_RETURN(Shape convolve_shape,
@@ -167,10 +167,9 @@ StatusOr<HloInstruction*> MakeConcatHlo(
HloInstruction::CreateConcatenate(concat_shape, operands, dimension));
}
-StatusOr<HloInstruction*> MakeDotHlo(
- HloInstruction* lhs, HloInstruction* rhs,
- const DotDimensionNumbers& dim_numbers,
- const PrecisionConfigProto& precision_config) {
+StatusOr<HloInstruction*> MakeDotHlo(HloInstruction* lhs, HloInstruction* rhs,
+ const DotDimensionNumbers& dim_numbers,
+ const PrecisionConfig& precision_config) {
HloComputation* computation = lhs->parent();
CHECK_EQ(computation, rhs->parent());
TF_ASSIGN_OR_RETURN(
diff --git a/tensorflow/compiler/xla/service/hlo_creation_utils.h b/tensorflow/compiler/xla/service/hlo_creation_utils.h
index 1c82956907..b22058abb4 100644
--- a/tensorflow/compiler/xla/service/hlo_creation_utils.h
+++ b/tensorflow/compiler/xla/service/hlo_creation_utils.h
@@ -50,7 +50,7 @@ StatusOr<HloInstruction*> MakeSliceHlo(HloInstruction* operand,
StatusOr<HloInstruction*> MakeConvolveHlo(
HloInstruction* lhs, HloInstruction* rhs, int64 feature_group_count,
const Window& window, const ConvolutionDimensionNumbers& dimension_numbers,
- const PrecisionConfigProto& precision_config);
+ const PrecisionConfig& precision_config);
// Creates a transpose HLO instruction and adds it to the computation containing
// `operand`.
@@ -98,10 +98,9 @@ StatusOr<HloInstruction*> MakeConcatHlo(
// Creates a Dot HLO instruction and adds it to the computation containing `lhs`
// and `rhs` (both must be in the same computation).
-StatusOr<HloInstruction*> MakeDotHlo(
- HloInstruction* lhs, HloInstruction* rhs,
- const DotDimensionNumbers& dim_numbers,
- const PrecisionConfigProto& precision_config);
+StatusOr<HloInstruction*> MakeDotHlo(HloInstruction* lhs, HloInstruction* rhs,
+ const DotDimensionNumbers& dim_numbers,
+ const PrecisionConfig& precision_config);
// Creates a Map HLO instruction and adds it to the computation containing the
// operands. All operands must be in the same computation.
diff --git a/tensorflow/compiler/xla/service/hlo_cse.cc b/tensorflow/compiler/xla/service/hlo_cse.cc
index cb367adf5e..b59c9ba3ed 100644
--- a/tensorflow/compiler/xla/service/hlo_cse.cc
+++ b/tensorflow/compiler/xla/service/hlo_cse.cc
@@ -23,6 +23,7 @@ limitations under the License.
#include <utility>
#include <vector>
+#include "absl/container/inlined_vector.h"
#include "tensorflow/compiler/xla/layout_util.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
@@ -34,7 +35,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/flatset.h"
-#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/lib/hash/hash.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/hlo_dataflow_analysis_test.cc b/tensorflow/compiler/xla/service/hlo_dataflow_analysis_test.cc
index 62eea2b06c..510d6360a1 100644
--- a/tensorflow/compiler/xla/service/hlo_dataflow_analysis_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_dataflow_analysis_test.cc
@@ -28,6 +28,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/test_helpers.h"
#include "tensorflow/compiler/xla/tests/hlo_test_base.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
@@ -1261,9 +1262,10 @@ TEST_P(HloDataflowAnalysisTest, MultipleEntryParameters_Sequential) {
auto entry = module_->AddEntryComputation(builder.Build());
RunAnalysis(GetParam());
- SequentialHloOrdering::HloModuleSequence sequence;
- sequence.insert({entry, {param0, negate, param1, exp, add}});
- SequentialHloOrdering ordering(module_.get(), sequence);
+ HloSchedule schedule(module_.get());
+ schedule.set_sequence(entry, {param0, negate, param1, exp, add});
+ TF_ASSERT_OK(schedule.Verify());
+ SequentialHloOrdering ordering(schedule);
// Entry parameters interfere as if they are defined simultaneously at
// the very beginning.
@@ -1339,14 +1341,16 @@ TEST_P(HloDataflowAnalysisTest, WhileParameters_Sequential) {
bool ssa_form = GetParam();
RunAnalysis(ssa_form);
- SequentialHloOrdering::HloModuleSequence sequence;
- sequence.insert({entry, {param, xla_while}});
- sequence.insert({condition, {cond_param, cond_constant}});
+ HloSchedule schedule(module_.get());
+ schedule.set_sequence(entry, {param, xla_while});
+ schedule.set_sequence(condition, {cond_param, cond_constant});
// Construct the order such that 'constant' and its use 'exp' are before
// body_param.
- sequence.insert({body, {constant, exp, body_param, add}});
+ schedule.set_sequence(
+ body, {constant, exp, body_param, add, dead_constant, dead_negate});
+ TF_ASSERT_OK(schedule.Verify());
- SequentialHloOrdering ordering(module_.get(), sequence);
+ SequentialHloOrdering ordering(schedule);
// 'add' is live out of the body and will interfere with an later instructions
// such as 'dead_constant' and 'dead_negate'.
@@ -1476,11 +1480,10 @@ TEST_P(HloDataflowAnalysisTest, OverlappedValuesSequentialOrder) {
auto entry = module_->AddEntryComputation(builder.Build());
RunAnalysis(GetParam());
- SequentialHloOrdering::HloModuleSequence sequence;
- std::vector<const HloInstruction*> order = {param, negate, exp, add};
- sequence.emplace(entry, order);
-
- SequentialHloOrdering ordering(module_.get(), sequence);
+ HloSchedule schedule(module_.get());
+ schedule.set_sequence(entry, {param, negate, exp, add});
+ TF_ASSERT_OK(schedule.Verify());
+ SequentialHloOrdering ordering(schedule);
EXPECT_TRUE(InstructionsMayInterfere(ordering, param, negate));
EXPECT_FALSE(InstructionsMayInterfere(ordering, param, exp));
@@ -2334,9 +2337,9 @@ TEST_F(CanShareOperandBufferWithUserTest, FusedDotAdd) {
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
- PrecisionConfigProto precision_config;
+ PrecisionConfig precision_config;
precision_config.mutable_operand_precision()->Resize(
- 2, PrecisionConfigProto::DEFAULT);
+ 2, PrecisionConfig::DEFAULT);
auto dot = builder.AddInstruction(
HloInstruction::CreateDot(data_shape, a, b, dot_dnums, precision_config));
diff --git a/tensorflow/compiler/xla/service/hlo_evaluator.cc b/tensorflow/compiler/xla/service/hlo_evaluator.cc
index ffb3451164..d0d955fea8 100644
--- a/tensorflow/compiler/xla/service/hlo_evaluator.cc
+++ b/tensorflow/compiler/xla/service/hlo_evaluator.cc
@@ -345,7 +345,7 @@ StatusOr<std::unique_ptr<Literal>> HloEvaluator::EvaluateElementwiseUnaryOp(
StatusOr<std::unique_ptr<Literal>> HloEvaluator::EvaluateDotOp(
const DotDimensionNumbers& dim_numbers,
- const PrecisionConfigProto& precision_config, const Literal& lhs,
+ const PrecisionConfig& precision_config, const Literal& lhs,
const Literal& rhs) {
std::unique_ptr<HloInstruction> lhs_instr =
HloInstruction::CreateConstant(lhs.CloneToUnique());
diff --git a/tensorflow/compiler/xla/service/hlo_evaluator.h b/tensorflow/compiler/xla/service/hlo_evaluator.h
index e13af8e999..72252bafc7 100644
--- a/tensorflow/compiler/xla/service/hlo_evaluator.h
+++ b/tensorflow/compiler/xla/service/hlo_evaluator.h
@@ -116,7 +116,7 @@ class HloEvaluator : public DfsHloVisitorWithDefault {
StatusOr<std::unique_ptr<Literal>> EvaluateDotOp(
const DotDimensionNumbers& dim_numbers,
- const PrecisionConfigProto& precision_config, const Literal& lhs,
+ const PrecisionConfig& precision_config, const Literal& lhs,
const Literal& rhs);
protected:
diff --git a/tensorflow/compiler/xla/service/hlo_evaluator_test.cc b/tensorflow/compiler/xla/service/hlo_evaluator_test.cc
index f586f253da..abd4bb1f73 100644
--- a/tensorflow/compiler/xla/service/hlo_evaluator_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_evaluator_test.cc
@@ -622,13 +622,6 @@ TEST_P(HloEvaluatorTest, NegativeAndInteriorPadding2D) {
EXPECT_TRUE(LiteralTestUtil::Equal(*expected, *result));
}
-PrecisionConfigProto DefaultPrecisionConfig(int operands) {
- PrecisionConfigProto precision_config;
- precision_config.mutable_operand_precision()->Resize(
- operands, PrecisionConfigProto::DEFAULT);
- return precision_config;
-}
-
TEST_P(HloEvaluatorTest, DotRank2AndRank1) {
HloComputation::Builder b(TestName());
diff --git a/tensorflow/compiler/xla/service/hlo_instruction.cc b/tensorflow/compiler/xla/service/hlo_instruction.cc
index f25761ac70..471a12d6aa 100644
--- a/tensorflow/compiler/xla/service/hlo_instruction.cc
+++ b/tensorflow/compiler/xla/service/hlo_instruction.cc
@@ -347,9 +347,9 @@ StatusOr<std::unique_ptr<HloInstruction>> HloInstruction::CreateFromProto(
<< proto.operand_ids_size();
TF_RET_CHECK(proto.has_window());
TF_RET_CHECK(proto.has_convolution_dimension_numbers());
- PrecisionConfigProto precision_config = proto.precision_config();
+ PrecisionConfig precision_config = proto.precision_config();
precision_config.mutable_operand_precision()->Resize(
- proto.operand_ids_size(), PrecisionConfigProto::DEFAULT);
+ proto.operand_ids_size(), PrecisionConfig::DEFAULT);
instruction = CreateConvolve(
proto.shape(), operands(0), operands(1),
std::max<int64>(proto.feature_group_count(), 1), proto.window(),
@@ -475,7 +475,7 @@ StatusOr<std::unique_ptr<HloInstruction>> HloInstruction::CreateFromProto(
if (instruction->opcode() == HloOpcode::kDot) {
instruction->precision_config_ = proto.precision_config();
instruction->precision_config_.mutable_operand_precision()->Resize(
- instruction->operand_count(), PrecisionConfigProto::DEFAULT);
+ instruction->operand_count(), PrecisionConfig::DEFAULT);
TF_RET_CHECK(proto.has_dot_dimension_numbers());
instruction->dot_dimension_numbers_ =
absl::make_unique<DotDimensionNumbers>(
@@ -657,7 +657,7 @@ HloInstruction::CreateGetTupleElement(const Shape& shape,
const Shape& shape, HloInstruction* lhs, HloInstruction* rhs,
int64 feature_group_count, const Window& window,
const ConvolutionDimensionNumbers& dimension_numbers,
- const PrecisionConfigProto& precision_config) {
+ const PrecisionConfig& precision_config) {
return absl::make_unique<HloConvolutionInstruction>(
shape, lhs, rhs, feature_group_count, window, dimension_numbers,
precision_config);
@@ -673,7 +673,7 @@ HloInstruction::CreateGetTupleElement(const Shape& shape,
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateDot(
const Shape& shape, HloInstruction* lhs, HloInstruction* rhs,
const DotDimensionNumbers& dimension_numbers,
- const PrecisionConfigProto& precision_config) {
+ const PrecisionConfig& precision_config) {
auto instruction =
absl::WrapUnique(new HloInstruction(HloOpcode::kDot, shape));
instruction->AppendOperand(lhs);
@@ -2888,8 +2888,8 @@ string RandomDistributionToString(const RandomDistribution& distribution) {
return absl::AsciiStrToLower(RandomDistribution_Name(distribution));
}
-string PrecisionToString(const PrecisionConfigProto::Precision& precision) {
- return absl::AsciiStrToLower(PrecisionConfigProto::Precision_Name(precision));
+string PrecisionToString(const PrecisionConfig::Precision& precision) {
+ return absl::AsciiStrToLower(PrecisionConfig::Precision_Name(precision));
}
string ConvolutionDimensionNumbersToString(
@@ -2967,32 +2967,31 @@ StatusOr<RandomDistribution> StringToRandomDistribution(const string& name) {
string HloInstruction::PrecisionConfigToString() const {
if (absl::c_all_of(
precision_config_.operand_precision(), [](int32 precision) {
- return static_cast<PrecisionConfigProto::Precision>(precision) ==
- PrecisionConfigProto::DEFAULT;
+ return static_cast<PrecisionConfig::Precision>(precision) ==
+ PrecisionConfig::DEFAULT;
})) {
return "";
}
return StrCat(
"operand_precision={",
- StrJoin(precision_config_.operand_precision(), ",",
- [](string* out, int32 precision) {
- CHECK(PrecisionConfigProto::Precision_IsValid(precision))
- << precision;
- StrAppend(out, PrecisionToString(
- static_cast<PrecisionConfigProto::Precision>(
- precision)));
- }),
+ StrJoin(
+ precision_config_.operand_precision(), ",",
+ [](string* out, int32 precision) {
+ CHECK(PrecisionConfig::Precision_IsValid(precision)) << precision;
+ StrAppend(out,
+ PrecisionToString(
+ static_cast<PrecisionConfig::Precision>(precision)));
+ }),
"}");
}
-StatusOr<PrecisionConfigProto::Precision> StringToPrecision(
- const string& name) {
- static std::unordered_map<string, PrecisionConfigProto::Precision>* map = [] {
+StatusOr<PrecisionConfig::Precision> StringToPrecision(const string& name) {
+ static std::unordered_map<string, PrecisionConfig::Precision>* map = [] {
static auto* map =
- new std::unordered_map<string, PrecisionConfigProto::Precision>;
- for (int i = 0; i < PrecisionConfigProto::Precision_ARRAYSIZE; i++) {
- if (PrecisionConfigProto::Precision_IsValid(i)) {
- auto value = static_cast<PrecisionConfigProto::Precision>(i);
+ new std::unordered_map<string, PrecisionConfig::Precision>;
+ for (int i = 0; i < PrecisionConfig::Precision_ARRAYSIZE; i++) {
+ if (PrecisionConfig::Precision_IsValid(i)) {
+ auto value = static_cast<PrecisionConfig::Precision>(i);
(*map)[PrecisionToString(value)] = value;
}
}
diff --git a/tensorflow/compiler/xla/service/hlo_instruction.h b/tensorflow/compiler/xla/service/hlo_instruction.h
index 55d592ff94..691f8155f9 100644
--- a/tensorflow/compiler/xla/service/hlo_instruction.h
+++ b/tensorflow/compiler/xla/service/hlo_instruction.h
@@ -407,7 +407,7 @@ class HloInstruction {
const Shape& shape, HloInstruction* lhs, HloInstruction* rhs,
int64 feature_group_count, const Window& window,
const ConvolutionDimensionNumbers& dimension_numbers,
- const PrecisionConfigProto& precision_config);
+ const PrecisionConfig& precision_config);
// Creates an FFT op, of the type indicated by fft_type.
static std::unique_ptr<HloInstruction> CreateFft(
@@ -419,7 +419,7 @@ class HloInstruction {
static std::unique_ptr<HloInstruction> CreateDot(
const Shape& shape, HloInstruction* lhs, HloInstruction* rhs,
const DotDimensionNumbers& dimension_numbers,
- const PrecisionConfigProto& precision_config);
+ const PrecisionConfig& precision_config);
// Creates a dot op with operands 'lhs' and 'rhs' that contracts dimension 1
// of the LHS with dimension 0 of the RHS with no batch dimensions. Both LHS
@@ -1262,10 +1262,8 @@ class HloInstruction {
// information. Transformations to other HLOs will not preserve this
// information but it is presumed that the alternate lowering is strictly
// superior.
- const PrecisionConfigProto& precision_config() const {
- return precision_config_;
- }
- void set_precision_config(const PrecisionConfigProto& precision_config) {
+ const PrecisionConfig& precision_config() const { return precision_config_; }
+ void set_precision_config(const PrecisionConfig& precision_config) {
precision_config_ = precision_config;
}
@@ -1680,7 +1678,7 @@ class HloInstruction {
// Information used to communicate to the implementation about the algorithm
// used to produce results. See the documentation on precision_config().
- PrecisionConfigProto precision_config_;
+ PrecisionConfig precision_config_;
// String identifier for instruction.
string name_;
@@ -1704,12 +1702,12 @@ StatusOr<HloInstruction::FusionKind> StringToFusionKind(
string PaddingConfigToString(const PaddingConfig& padding);
string OpMetadataToString(const OpMetadata& metadata);
string RandomDistributionToString(const RandomDistribution& distribution);
-string PrecisionToString(const PrecisionConfigProto::Precision& precision);
+string PrecisionToString(const PrecisionConfig::Precision& precision);
string ConvolutionDimensionNumbersToString(
const ConvolutionDimensionNumbers& dnums);
StatusOr<RandomDistribution> StringToRandomDistribution(const string& name);
-StatusOr<PrecisionConfigProto::Precision> StringToPrecision(const string& name);
+StatusOr<PrecisionConfig::Precision> StringToPrecision(const string& name);
std::ostream& operator<<(std::ostream& os, HloInstruction::FusionKind kind);
diff --git a/tensorflow/compiler/xla/service/hlo_instruction_test.cc b/tensorflow/compiler/xla/service/hlo_instruction_test.cc
index b4e302e832..c1b7c3832b 100644
--- a/tensorflow/compiler/xla/service/hlo_instruction_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_instruction_test.cc
@@ -1122,13 +1122,6 @@ TEST_F(HloInstructionTest, PartiallyElementwiseWithReuse) {
}
}
-PrecisionConfigProto DefaultPrecisionConfig(int operands) {
- PrecisionConfigProto precision_config;
- precision_config.mutable_operand_precision()->Resize(
- operands, PrecisionConfigProto::DEFAULT);
- return precision_config;
-}
-
TEST_F(HloInstructionTest, CloneOfFusionPreservesShape) {
// Fused expression:
//
@@ -1759,9 +1752,9 @@ TEST_F(HloInstructionTest, PreserveOperandPrecisionOnCloneConv) {
auto* conv = module->entry_computation()->root_instruction();
auto clone = conv->Clone();
- EXPECT_THAT(clone->precision_config().operand_precision(),
- ::testing::ElementsAre(PrecisionConfigProto::HIGH,
- PrecisionConfigProto::DEFAULT));
+ EXPECT_THAT(
+ clone->precision_config().operand_precision(),
+ ::testing::ElementsAre(PrecisionConfig::HIGH, PrecisionConfig::DEFAULT));
}
} // namespace
diff --git a/tensorflow/compiler/xla/service/hlo_instructions.cc b/tensorflow/compiler/xla/service/hlo_instructions.cc
index e3683aaec9..ad87aa1123 100644
--- a/tensorflow/compiler/xla/service/hlo_instructions.cc
+++ b/tensorflow/compiler/xla/service/hlo_instructions.cc
@@ -1630,7 +1630,7 @@ HloConvolutionInstruction::HloConvolutionInstruction(
const Shape& shape, HloInstruction* lhs, HloInstruction* rhs,
int64 feature_group_count, const Window& window,
const ConvolutionDimensionNumbers& dimension_numbers,
- const PrecisionConfigProto& precision_config)
+ const PrecisionConfig& precision_config)
: HloInstruction(HloOpcode::kConvolution, shape),
feature_group_count_(feature_group_count),
window_(window),
diff --git a/tensorflow/compiler/xla/service/hlo_instructions.h b/tensorflow/compiler/xla/service/hlo_instructions.h
index 1c85aa4681..e1215a7566 100644
--- a/tensorflow/compiler/xla/service/hlo_instructions.h
+++ b/tensorflow/compiler/xla/service/hlo_instructions.h
@@ -944,7 +944,7 @@ class HloConvolutionInstruction : public HloInstruction {
const Shape& shape, HloInstruction* lhs, HloInstruction* rhs,
int64 feature_group_count, const Window& window,
const ConvolutionDimensionNumbers& dimension_numbers,
- const PrecisionConfigProto& precision_config);
+ const PrecisionConfig& precision_config);
const Window& window() const override { return window_; }
void set_window(const Window& window) override { window_ = window; }
const ConvolutionDimensionNumbers& convolution_dimension_numbers() const {
diff --git a/tensorflow/compiler/xla/service/hlo_ordering.cc b/tensorflow/compiler/xla/service/hlo_ordering.cc
index 0581d5c404..2105f7a349 100644
--- a/tensorflow/compiler/xla/service/hlo_ordering.cc
+++ b/tensorflow/compiler/xla/service/hlo_ordering.cc
@@ -18,6 +18,7 @@ limitations under the License.
#include <utility>
#include <vector>
+#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
@@ -252,6 +253,12 @@ bool HloOrdering::LiveRangeStrictlyBefore(
VLOG(4) << a << " not defined before " << b;
return false;
}
+
+ if (a.live_out_of_module()) {
+ VLOG(4) << a << " is live out of module and defined before " << b;
+ return false;
+ }
+
// All uses of 'a' must be before 'b' is defined.
for (const HloUse& use : a.uses()) {
if (dataflow.DoesNotUseOperandBuffer(a.instruction(), a.index(),
@@ -264,6 +271,18 @@ bool HloOrdering::LiveRangeStrictlyBefore(
return false;
}
}
+
+ if (a.instruction()->parent() == b.instruction()->parent()) {
+ for (const HloPosition& position : a.positions()) {
+ if (position.instruction ==
+ a.instruction()->parent()->root_instruction()) {
+ VLOG(4) << a << " is live out of computation and defined before " << b
+ << " which is in same computation";
+ return false;
+ }
+ }
+ }
+
return true;
}
@@ -336,15 +355,24 @@ string DependencyHloOrdering::ToString() const {
return ToStringHelper("DependencyHloOrdering");
}
-SequentialHloOrdering::SequentialHloOrdering(
- const HloModule* module, const HloModuleSequence& module_sequence)
- : HloOrdering(module), module_sequence_(module_sequence) {
+SequentialHloOrdering::SequentialHloOrdering(const HloSchedule& schedule)
+ : HloOrdering(schedule.module()), schedule_(schedule) {
+ Initialize();
+}
+
+SequentialHloOrdering::SequentialHloOrdering(HloSchedule&& schedule)
+ : HloOrdering(schedule.module()), schedule_(std::move(schedule)) {
+ Initialize();
+}
+
+void SequentialHloOrdering::Initialize() {
// Create a map from instruction to its order position.
- for (auto computation_order : module_sequence_) {
- const std::vector<const HloInstruction*>& order = computation_order.second;
+ TF_DCHECK_OK(schedule_.Verify());
+ for (const auto& computation_sequence : schedule_.sequences()) {
+ const std::vector<const HloInstruction*>& order =
+ computation_sequence.second.instructions();
for (int i = 0; i < order.size(); ++i) {
- DCHECK_EQ(0, order_position_.count(order[i]));
- order_position_.emplace(order[i], i);
+ InsertOrDie(&order_position_, order[i], i);
}
}
}
@@ -362,49 +390,13 @@ bool SequentialHloOrdering::ExecutesBeforeInSameComputation(
const std::vector<const HloInstruction*>*
SequentialHloOrdering::SequentialOrder(
const HloComputation& computation) const {
- auto find_it = module_sequence_.find(&computation);
- return find_it == module_sequence_.end() ? nullptr : &find_it->second;
+ return schedule_.is_computation_scheduled(&computation)
+ ? &schedule_.sequence(&computation).instructions()
+ : nullptr;
}
string SequentialHloOrdering::ToString() const {
- std::vector<string> pieces;
- pieces.push_back("SequentialHloOrdering");
- for (auto* computation : module_->computations()) {
- pieces.push_back(
- absl::StrFormat("computation %s order:", computation->name()));
- // Gather all instructions in the module sequence for this computation and
- // sort them by their position.
- std::vector<const HloInstruction*> instructions;
- for (auto& instruction_position : order_position_) {
- const HloInstruction* instruction = instruction_position.first;
- if (instruction->parent() == computation) {
- instructions.push_back(instruction);
- }
- }
- std::sort(instructions.begin(), instructions.end(),
- [this](const HloInstruction* a, const HloInstruction* b) {
- return order_position_.at(a) < order_position_.at(b);
- });
- for (auto instruction : instructions) {
- pieces.push_back(absl::StrFormat(" %s", instruction->name()));
- }
- }
- return absl::StrJoin(pieces, "\n");
-}
-
-std::ostream& operator<<(
- std::ostream& out,
- const SequentialHloOrdering::HloModuleSequence& module_sequence) {
- for (auto computation_pair : module_sequence) {
- const HloComputation* computation = computation_pair.first;
- const std::vector<const HloInstruction*>& computation_sequence =
- computation_pair.second;
- out << "Computation " << computation->name() << ":\n";
- for (auto* instruction : computation_sequence) {
- out << " " << instruction->name() << "\n";
- }
- }
- return out;
+ return absl::StrCat("SequentialHloOrdering\n", schedule_.ToString());
}
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/hlo_ordering.h b/tensorflow/compiler/xla/service/hlo_ordering.h
index 985f3fa64d..b21071c4b2 100644
--- a/tensorflow/compiler/xla/service/hlo_ordering.h
+++ b/tensorflow/compiler/xla/service/hlo_ordering.h
@@ -25,6 +25,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/hlo_dataflow_analysis.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_module.h"
+#include "tensorflow/compiler/xla/service/hlo_schedule.h"
#include "tensorflow/compiler/xla/service/hlo_value.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/core/lib/gtl/flatmap.h"
@@ -183,17 +184,8 @@ class DependencyHloOrdering : public PredecessorHloOrdering {
// interference is reduced relative to DependencyHloOrdering.
class SequentialHloOrdering : public HloOrdering {
public:
- // TODO(dimvar): HloModuleSequence is not a good name because it sounds like
- // a sequence of modules, instead of a map of schedules for all computations
- // in a module. We should change it at some point.
- //
- // A sequence of instructions for each computation in the module.
- using HloModuleSequence =
- tensorflow::gtl::FlatMap<const HloComputation*,
- std::vector<const HloInstruction*>>;
-
- SequentialHloOrdering(const HloModule* module,
- const HloModuleSequence& module_sequence);
+ SequentialHloOrdering(const HloSchedule& schedule);
+ SequentialHloOrdering(HloSchedule&& schedule);
~SequentialHloOrdering() override = default;
// Returns the sequential instruction order for the given computation.
@@ -203,10 +195,12 @@ class SequentialHloOrdering : public HloOrdering {
string ToString() const override;
protected:
+ void Initialize();
+
bool ExecutesBeforeInSameComputation(const HloInstruction* a,
const HloInstruction* b) const override;
- const HloModuleSequence module_sequence_;
+ const HloSchedule schedule_;
// The position of every instruction in the HLO module in its respective
// computation sequence (a value of zero indicates the instruction is first in
@@ -217,10 +211,6 @@ class SequentialHloOrdering : public HloOrdering {
tensorflow::gtl::FlatMap<const HloInstruction*, int> order_position_;
};
-std::ostream& operator<<(
- std::ostream& out,
- const SequentialHloOrdering::HloModuleSequence& module_sequence);
-
} // namespace xla
#endif // TENSORFLOW_COMPILER_XLA_SERVICE_HLO_ORDERING_H_
diff --git a/tensorflow/compiler/xla/service/hlo_ordering_test.cc b/tensorflow/compiler/xla/service/hlo_ordering_test.cc
index 126d3a2d9c..6b6005e7a5 100644
--- a/tensorflow/compiler/xla/service/hlo_ordering_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_ordering_test.cc
@@ -23,11 +23,13 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
#include "tensorflow/compiler/xla/service/hlo_parser.h"
+#include "tensorflow/compiler/xla/service/hlo_schedule.h"
#include "tensorflow/compiler/xla/service/hlo_scheduling.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/tests/hlo_test_base.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/lib/core/status_test_util.h"
namespace xla {
namespace {
@@ -376,5 +378,104 @@ ENTRY root {
dataflow->GetValueDefinedAt(add_3)));
}
+TEST_F(HloOrderingTest,
+ ValuesLiveOutOfModuleInterfereWithInstructionsAfterRoot) {
+ // Tests that values live out of the module should interfere with values
+ // defined after the root instruction. That is:
+ //
+ // %param = param(0)
+ // ROOT %root = negate(%param)
+ // %dead = Constant(123.0)
+ //
+ // %root should interfere with %dead.
+ auto module = CreateNewModule();
+ const Shape scalar_shape = ShapeUtil::MakeShape(xla::F32, {});
+
+ auto builder = HloComputation::Builder(TestName());
+ HloInstruction* param = builder.AddInstruction(
+ HloInstruction::CreateParameter(0, scalar_shape, "param"));
+ HloInstruction* root = builder.AddInstruction(
+ HloInstruction::CreateUnary(scalar_shape, HloOpcode::kNegate, param));
+ HloInstruction* dead = builder.AddInstruction(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(123.0f)));
+ HloComputation* entry =
+ module->AddEntryComputation(builder.Build(/*root_instruction=*/root));
+
+ HloSchedule schedule(module.get());
+ schedule.set_sequence(entry, {param, root, dead});
+ TF_ASSERT_OK(schedule.Verify());
+ SequentialHloOrdering ordering(schedule);
+
+ TF_ASSERT_OK_AND_ASSIGN(auto dataflow,
+ HloDataflowAnalysis::Run(*module, /*ssa_form=*/true));
+
+ EXPECT_TRUE(ordering.ExecutesBefore(root, dead));
+ EXPECT_FALSE(ordering.ExecutesBefore(dead, root));
+
+ EXPECT_FALSE(ordering.LiveRangeStrictlyBefore(
+ dataflow->GetValueDefinedAt(root), dataflow->GetValueDefinedAt(dead),
+ *dataflow));
+
+ EXPECT_TRUE(ordering.MayInterfere(dataflow->GetValueDefinedAt(root),
+ dataflow->GetValueDefinedAt(dead),
+ *dataflow));
+}
+
+TEST_F(HloOrderingTest,
+ ValuesLiveOutOfComputationInterfereWithInstructionsAfterRoot) {
+ // Tests that values live out of a computation should interfere with values
+ // defined after the root instruction of the computation. That is:
+ //
+ // subcomputation:
+ // %param = param(0)
+ // ROOT %root = negate(%param)
+ // %dead = Constant(123.0)
+ //
+ // entry computation:
+ // %c = constant(42.0)
+ // ROOT %call = call({%c}), subcomputation
+ //
+ // %root should interfere with %dead.
+ auto module = CreateNewModule();
+ const Shape scalar_shape = ShapeUtil::MakeShape(xla::F32, {});
+
+ auto subbuilder = HloComputation::Builder(TestName() + ".sub");
+ HloInstruction* param = subbuilder.AddInstruction(
+ HloInstruction::CreateParameter(0, scalar_shape, "param"));
+ HloInstruction* root = subbuilder.AddInstruction(
+ HloInstruction::CreateUnary(scalar_shape, HloOpcode::kNegate, param));
+ HloInstruction* dead = subbuilder.AddInstruction(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(123.0f)));
+ HloComputation* subcomputation = module->AddEmbeddedComputation(
+ subbuilder.Build(/*root_instruction=*/root));
+
+ auto builder = HloComputation::Builder(TestName());
+ HloInstruction* c = builder.AddInstruction(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
+ HloInstruction* call = builder.AddInstruction(
+ HloInstruction::CreateCall(scalar_shape, {c}, subcomputation));
+ HloComputation* entry = module->AddEntryComputation(builder.Build());
+
+ HloSchedule schedule(module.get());
+ schedule.set_sequence(subcomputation, {param, root, dead});
+ schedule.set_sequence(entry, {c, call});
+ TF_ASSERT_OK(schedule.Verify());
+ SequentialHloOrdering ordering(schedule);
+
+ TF_ASSERT_OK_AND_ASSIGN(auto dataflow,
+ HloDataflowAnalysis::Run(*module, /*ssa_form=*/true));
+
+ EXPECT_TRUE(ordering.ExecutesBefore(root, dead));
+ EXPECT_FALSE(ordering.ExecutesBefore(dead, root));
+
+ EXPECT_FALSE(ordering.LiveRangeStrictlyBefore(
+ dataflow->GetValueDefinedAt(root), dataflow->GetValueDefinedAt(dead),
+ *dataflow));
+
+ EXPECT_TRUE(ordering.MayInterfere(dataflow->GetValueDefinedAt(root),
+ dataflow->GetValueDefinedAt(dead),
+ *dataflow));
+}
+
} // namespace
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/hlo_parser.cc b/tensorflow/compiler/xla/service/hlo_parser.cc
index 62f01c4adb..0f26ed4235 100644
--- a/tensorflow/compiler/xla/service/hlo_parser.cc
+++ b/tensorflow/compiler/xla/service/hlo_parser.cc
@@ -221,7 +221,7 @@ class HloParser {
bool ParseWindowPad(std::vector<std::vector<tensorflow::int64>>* pad);
bool ParseSliceRanges(SliceRanges* result);
- bool ParsePrecisionList(std::vector<PrecisionConfigProto::Precision>* result);
+ bool ParsePrecisionList(std::vector<PrecisionConfig::Precision>* result);
bool ParseInt64List(const TokKind start, const TokKind end,
const TokKind delim,
std::vector<tensorflow::int64>* result);
@@ -240,7 +240,7 @@ class HloParser {
bool ParseFftType(FftType* result);
bool ParseFusionKind(HloInstruction::FusionKind* result);
bool ParseRandomDistribution(RandomDistribution* result);
- bool ParsePrecision(PrecisionConfigProto::Precision* result);
+ bool ParsePrecision(PrecisionConfig::Precision* result);
bool ParseInt64(tensorflow::int64* result);
bool ParseDouble(double* result);
bool ParseBool(bool* result);
@@ -909,7 +909,7 @@ bool HloParser::ParseInstruction(HloComputation::Builder* builder,
AttrTy::kConvolutionDimensionNumbers, &dnums};
attrs["feature_group_count"] = {/*required=*/false, AttrTy::kInt64,
&feature_group_count};
- optional<std::vector<PrecisionConfigProto::Precision>> operand_precision;
+ optional<std::vector<PrecisionConfig::Precision>> operand_precision;
attrs["operand_precision"] = {/*required=*/false, AttrTy::kPrecisionList,
&operand_precision};
if (!ParseOperands(&operands, /*expected_size=*/2) ||
@@ -922,13 +922,13 @@ bool HloParser::ParseInstruction(HloComputation::Builder* builder,
if (!feature_group_count) {
feature_group_count = 1;
}
- PrecisionConfigProto precision_config;
+ PrecisionConfig precision_config;
if (operand_precision) {
*precision_config.mutable_operand_precision() = {
operand_precision->begin(), operand_precision->end()};
} else {
precision_config.mutable_operand_precision()->Resize(
- operands.size(), PrecisionConfigProto::DEFAULT);
+ operands.size(), PrecisionConfig::DEFAULT);
}
instruction = builder->AddInstruction(HloInstruction::CreateConvolve(
shape, /*lhs=*/operands[0], /*rhs=*/operands[1],
@@ -1279,7 +1279,7 @@ bool HloParser::ParseInstruction(HloComputation::Builder* builder,
optional<std::vector<tensorflow::int64>> rhs_batch_dims;
attrs["rhs_batch_dims"] = {/*required=*/false, AttrTy::kBracedInt64List,
&rhs_batch_dims};
- optional<std::vector<PrecisionConfigProto::Precision>> operand_precision;
+ optional<std::vector<PrecisionConfig::Precision>> operand_precision;
attrs["operand_precision"] = {/*required=*/false, AttrTy::kPrecisionList,
&operand_precision};
@@ -1306,13 +1306,13 @@ bool HloParser::ParseInstruction(HloComputation::Builder* builder,
rhs_batch_dims->end()};
}
- PrecisionConfigProto precision_config;
+ PrecisionConfig precision_config;
if (operand_precision) {
*precision_config.mutable_operand_precision() = {
operand_precision->begin(), operand_precision->end()};
} else {
precision_config.mutable_operand_precision()->Resize(
- operands.size(), PrecisionConfigProto::DEFAULT);
+ operands.size(), PrecisionConfig::DEFAULT);
}
instruction = builder->AddInstruction(HloInstruction::CreateDot(
@@ -2410,11 +2410,11 @@ bool HloParser::ParseAttributeHelper(
return ParseDomain(static_cast<DomainData*>(attr_out_ptr));
}
case AttrTy::kPrecisionList: {
- std::vector<PrecisionConfigProto::Precision> result;
+ std::vector<PrecisionConfig::Precision> result;
if (!ParsePrecisionList(&result)) {
return false;
}
- static_cast<optional<std::vector<PrecisionConfigProto::Precision>>*>(
+ static_cast<optional<std::vector<PrecisionConfig::Precision>>*>(
attr_out_ptr)
->emplace(result);
return true;
@@ -2698,9 +2698,9 @@ bool HloParser::ParseSliceRanges(SliceRanges* result) {
// ::= /*empty*/
// ::= precision_val (delim precision_val)*
bool HloParser::ParsePrecisionList(
- std::vector<PrecisionConfigProto::Precision>* result) {
+ std::vector<PrecisionConfig::Precision>* result) {
auto parse_and_add_item = [&]() {
- PrecisionConfigProto::Precision item;
+ PrecisionConfig::Precision item;
if (!ParsePrecision(&item)) {
return false;
}
@@ -3032,7 +3032,7 @@ bool HloParser::ParseRandomDistribution(RandomDistribution* result) {
return true;
}
-bool HloParser::ParsePrecision(PrecisionConfigProto::Precision* result) {
+bool HloParser::ParsePrecision(PrecisionConfig::Precision* result) {
VLOG(1) << "ParsePrecision";
if (lexer_.GetKind() != TokKind::kIdent) {
return TokenError("expects random distribution");
diff --git a/tensorflow/compiler/xla/service/hlo_rematerialization.cc b/tensorflow/compiler/xla/service/hlo_rematerialization.cc
index c9629926ea..0a0a6a323e 100644
--- a/tensorflow/compiler/xla/service/hlo_rematerialization.cc
+++ b/tensorflow/compiler/xla/service/hlo_rematerialization.cc
@@ -962,8 +962,7 @@ StatusOr<int64> HloRematerialization::CalledComputationsMemoryUsage(
}
StatusOr<bool> HloRematerialization::RematerializeComputation(
- HloComputation* computation,
- SequentialHloOrdering::HloModuleSequence* sequence,
+ HloComputation* computation, HloSchedule* schedule,
int64 memory_limit_bytes) {
VLOG(1) << "Rematerializing computation " << computation->name()
<< " with limit " << HumanReadableNumBytes(memory_limit_bytes);
@@ -971,7 +970,8 @@ StatusOr<bool> HloRematerialization::RematerializeComputation(
<< HumanReadableNumBytes(computation_peak_memory_.at(computation));
CHECK(!ContainsKey(rematerialized_computations_, computation));
- InstructionList instruction_list(sequence->at(computation));
+ InstructionList instruction_list(
+ schedule->sequence(computation).instructions());
MemoryUsageTracker memory_tracker(computation, size_function_,
*points_to_analysis_, instruction_list);
bool changed = false;
@@ -1145,7 +1145,7 @@ StatusOr<bool> HloRematerialization::RematerializeComputation(
0, memory_limit_bytes - memory_tracker.memory_usage());
TF_ASSIGN_OR_RETURN(
bool subcomputation_changed,
- RematerializeComputation(called_computation, sequence,
+ RematerializeComputation(called_computation, schedule,
subcomputation_memory_limit_bytes));
changed |= subcomputation_changed;
}
@@ -1179,12 +1179,12 @@ StatusOr<bool> HloRematerialization::RematerializeComputation(
computation_peak_memory_.at(computation) = peak_memory;
// Update order to include rematerialized instructions.
- auto& dst = sequence->at(computation);
- dst.clear();
+ HloInstructionSequence& sequence = schedule->GetOrCreateSequence(computation);
+ sequence.clear();
for (auto* item = instruction_list.first(); item != nullptr;
item = instruction_list.next(item)) {
const HloInstruction* instruction = item->instruction;
- dst.push_back(instruction);
+ sequence.push_back(instruction);
}
rematerialized_computations_.insert(computation);
@@ -1194,20 +1194,21 @@ StatusOr<bool> HloRematerialization::RematerializeComputation(
return changed;
}
-StatusOr<bool> HloRematerialization::Run(
- HloModule* module, SequentialHloOrdering::HloModuleSequence* sequence,
- int64 memory_limit_bytes, RematerializationSizes* sizes,
- CopyInsertion* copy_insertion) {
- // The sequence is constructed entirely by this method.
- TF_RET_CHECK(sequence->empty());
+StatusOr<bool> HloRematerialization::Run(HloModule* module,
+ HloSchedule* schedule,
+ int64 memory_limit_bytes,
+ RematerializationSizes* sizes,
+ CopyInsertion* copy_insertion) {
+ // The schedule is constructed entirely by this method.
+ TF_RET_CHECK(schedule->empty());
VLOG(1) << "HloRematerialization() with memory limit of "
<< HumanReadableNumBytes(memory_limit_bytes);
XLA_VLOG_LINES(3, "Before HloRematerialization:\n" + module->ToString());
- // Create initial sequence of HLO instructions.
- TF_ASSIGN_OR_RETURN(*sequence, ScheduleComputationsInModule(
- *module,
+ // Create initial schedule of HLO instructions.
+ TF_ASSIGN_OR_RETURN(*schedule,
+ ScheduleModule(*module,
[this](const BufferValue& buffer) {
return size_function_(buffer.shape());
},
@@ -1217,16 +1218,7 @@ StatusOr<bool> HloRematerialization::Run(
// ordering from the HLO schedule allows for more copies to be eliminated.
// TODO(b/80249101): Instead of a separate copy elision pass, use the
// ordering from the HLO schedule directly for copy insertion.
-
- // First create a copy of the schedule which contains HloInstruction unique
- // ids instead of HloInstruction*. This is necessary for updating the
- // schedule below.
- // TODO(b/113175018): Remove this when the HLO schedule is self-contained
- // and can update itself.
- tensorflow::gtl::FlatMap<const HloComputation*, std::vector<int>>
- id_sequence = ComputeIdSchedule(*sequence);
-
- SequentialHloOrdering ordering(module, *sequence);
+ SequentialHloOrdering ordering(*schedule);
TF_RETURN_IF_ERROR(
copy_insertion->RemoveUnnecessaryCopies(ordering, module));
@@ -1241,10 +1233,10 @@ StatusOr<bool> HloRematerialization::Run(
// The passes above can add and remove copies, update the schedule to
// account for these transformations. Newly added instructions will be
// placed ASAP in the schedule.
- TF_RETURN_IF_ERROR(UpdateSchedule(*module, id_sequence, sequence));
+ TF_RETURN_IF_ERROR(schedule->Update());
TF_DCHECK_OK(copy_insertion->VerifyNoLiveRangeInterference(
- SequentialHloOrdering(module, *sequence), module));
+ SequentialHloOrdering(*schedule), module));
}
TF_ASSIGN_OR_RETURN(points_to_analysis_, TuplePointsToAnalysis::Run(module));
@@ -1271,12 +1263,13 @@ StatusOr<bool> HloRematerialization::Run(
// sequential context.
call_graph_ = CallGraph::Build(module);
TF_RETURN_IF_ERROR(call_graph_->VisitNodes(
- [this, sequence](const CallGraphNode& node) -> Status {
+ [this, schedule](const CallGraphNode& node) -> Status {
if (node.context() == CallContext::kSequential) {
TF_ASSIGN_OR_RETURN(
computation_peak_memory_[node.computation()],
- ComputePeakMemory(node.computation(),
- sequence->at(node.computation())));
+ ComputePeakMemory(
+ node.computation(),
+ schedule->sequence(node.computation()).instructions()));
}
return Status::OK();
},
@@ -1295,7 +1288,7 @@ StatusOr<bool> HloRematerialization::Run(
// Subcomputations called by the entry computation will also be
// rematerialized.
TF_ASSIGN_OR_RETURN(bool changed, RematerializeComputation(
- module->entry_computation(), sequence,
+ module->entry_computation(), schedule,
adjusted_memory_limit_bytes));
// Rematerialization can introduce dead code. This occurs if all uses of an
@@ -1305,30 +1298,7 @@ StatusOr<bool> HloRematerialization::Run(
// After DCE, the module sequence may include instructions which no longer
// exist.
- for (const auto* computation : module->MakeNonfusionComputations()) {
- if (sequence->at(computation).size() != computation->instruction_count()) {
- // A size mismatch between the computation instruction count and the size
- // of the ordering of instructions can only be caused by DCE. Rebuild the
- // order by removing the deleted instructions from the order.
- tensorflow::gtl::FlatSet<const HloInstruction*> instruction_set;
- for (const auto& instruction : computation->instructions()) {
- instruction_set.insert(instruction);
- }
- // Move the old order into a temporary vector, then build new order
- // inplace.
- std::vector<const HloInstruction*>& order = sequence->at(computation);
- std::vector<const HloInstruction*> old_order;
- using std::swap;
- swap(order, old_order);
- std::copy_if(old_order.begin(), old_order.end(),
- std::back_inserter(order),
- [&instruction_set](const HloInstruction* instruction) {
- return ContainsKey(instruction_set, instruction);
- });
- TF_RET_CHECK(sequence->at(computation).size() ==
- computation->instruction_count());
- }
- }
+ TF_RETURN_IF_ERROR(schedule->Update());
VLOG(1) << "Rematerialized " << instructions_rematerialized_
<< " instructions in module " << module->name() << "; "
<< net_instructions_added_ << " net instructions added";
@@ -1366,11 +1336,10 @@ StatusOr<bool> HloRematerialization::Run(
/* static */ StatusOr<bool> HloRematerialization::RematerializeAndSchedule(
const HloRematerialization::ShapeSizeFunction& size_function,
int64 memory_limit_bytes, HloModule* hlo_module,
- MemorySchedulerAlgorithm scheduler_algorithm,
- SequentialHloOrdering::HloModuleSequence* sequence,
+ MemorySchedulerAlgorithm scheduler_algorithm, HloSchedule* schedule,
RematerializationSizes* sizes, CopyInsertion* copy_insertion) {
HloRematerialization remat(scheduler_algorithm, size_function);
- return remat.Run(hlo_module, sequence, memory_limit_bytes, sizes,
+ return remat.Run(hlo_module, schedule, memory_limit_bytes, sizes,
copy_insertion);
}
diff --git a/tensorflow/compiler/xla/service/hlo_rematerialization.h b/tensorflow/compiler/xla/service/hlo_rematerialization.h
index 2ec004350a..fa0414b472 100644
--- a/tensorflow/compiler/xla/service/hlo_rematerialization.h
+++ b/tensorflow/compiler/xla/service/hlo_rematerialization.h
@@ -21,6 +21,7 @@
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_module.h"
+#include "tensorflow/compiler/xla/service/hlo_schedule.h"
#include "tensorflow/compiler/xla/service/hlo_scheduling.h"
#include "tensorflow/compiler/xla/service/tuple_points_to_analysis.h"
@@ -50,7 +51,7 @@ class HloRematerialization {
//
// hlo_module: HLO module to rematerialize instructions in.
//
- // sequence: Should point to an empty HloModuleSequence. Upon return
+ // schedule: Should point to an empty HloSchedule. Upon return
// contains the HLO instruction order which was used for
// rematerialization. This is the order in which HLO instructions should
// be emitted to minimize memory use.
@@ -75,8 +76,8 @@ class HloRematerialization {
static StatusOr<bool> RematerializeAndSchedule(
const ShapeSizeFunction& size_function, int64 memory_limit_bytes,
HloModule* hlo_module, MemorySchedulerAlgorithm scheduler_algorithm,
- SequentialHloOrdering::HloModuleSequence* sequence,
- RematerializationSizes* sizes, CopyInsertion* copy_insertion = nullptr);
+ HloSchedule* schedule, RematerializationSizes* sizes,
+ CopyInsertion* copy_insertion = nullptr);
protected:
HloRematerialization(MemorySchedulerAlgorithm scheduler_algorithm,
@@ -87,10 +88,9 @@ class HloRematerialization {
// Runs rematerialization on the given module. Returns whether the module was
// changed. memory_limit is the target maximum peak memory usage by the
- // module. sequence should be an empty HloModuleSequence. Upon return sequence
+ // module. schedule should be an empty HloSchedule. Upon return sequence
// contains the memory-minimizing order in which to emit the HLO instructions.
- StatusOr<bool> Run(HloModule* module,
- SequentialHloOrdering::HloModuleSequence* sequence,
+ StatusOr<bool> Run(HloModule* module, HloSchedule* schedule,
int64 memory_limit, RematerializationSizes* sizes,
CopyInsertion* copy_insertion);
@@ -98,10 +98,9 @@ class HloRematerialization {
// order in which the computation's instructions will be emitted in the
// backend. Rematerialized instructions will be added to the HLO computation
// and inserted into 'order'.
- StatusOr<bool> RematerializeComputation(
- HloComputation* computation,
- SequentialHloOrdering::HloModuleSequence* sequence,
- int64 computation_memory_limit);
+ StatusOr<bool> RematerializeComputation(HloComputation* computation,
+ HloSchedule* schedule,
+ int64 memory_limit_bytes);
// Computes and returns the peak memory used by the given computation. The
// peak memory is the maximum total size of all live HLO instruction values at
diff --git a/tensorflow/compiler/xla/service/hlo_rematerialization_test.cc b/tensorflow/compiler/xla/service/hlo_rematerialization_test.cc
index ac8c97d380..83cb113bfb 100644
--- a/tensorflow/compiler/xla/service/hlo_rematerialization_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_rematerialization_test.cc
@@ -141,13 +141,13 @@ class HloRematerializationTest : public HloTestBase {
return ShapeUtil::ByteSizeOf(shape, sizeof(void*));
}
- StatusOr<bool> RunHloRematerialization(
- int64 memory_limit_bytes, HloModule* module,
- SequentialHloOrdering::HloModuleSequence* sequence) {
+ StatusOr<bool> RunHloRematerialization(int64 memory_limit_bytes,
+ HloModule* module,
+ HloSchedule* schedule) {
TF_EXPECT_OK(verifier().Run(module).status());
return HloRematerialization::RematerializeAndSchedule(
ByteSizeOf, memory_limit_bytes, module, DefaultMemoryScheduler,
- sequence, /*sizes=*/nullptr);
+ schedule, /*sizes=*/nullptr);
}
// Various shapes used in the canned computations.
@@ -170,12 +170,12 @@ TEST_F(HloRematerializationTest, SingleComputation) {
const HloInstruction* concat = slice->operand(0);
const HloInstruction* bcast = concat->operand(0);
- SequentialHloOrdering::HloModuleSequence sequence;
+ HloSchedule schedule(module.get());
// Computation requires 16KB without rematerialization, but uses only 12KB
// with rematerialization so pick a memory limit between these values (14KB).
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization(
/*memory_limit_bytes=*/14 * 1024,
- module.get(), &sequence));
+ module.get(), &schedule));
EXPECT_TRUE(changed);
// Root should not have changed.
@@ -187,9 +187,11 @@ TEST_F(HloRematerializationTest, SingleComputation) {
// The rematerialized broadcast should be immediate before the concat in the
// sequence.
- EXPECT_EQ(sequence.at(computation)[computation->instruction_count() - 2],
+ EXPECT_EQ(schedule.sequence(computation)
+ .instructions()[computation->instruction_count() - 2],
concat);
- EXPECT_EQ(sequence.at(computation)[computation->instruction_count() - 3],
+ EXPECT_EQ(schedule.sequence(computation)
+ .instructions()[computation->instruction_count() - 3],
remat_bcast);
}
@@ -203,10 +205,10 @@ TEST_F(HloRematerializationTest, SingleComputationNoRematerialization) {
EXPECT_EQ(computation->instruction_count(), 8);
- SequentialHloOrdering::HloModuleSequence sequence;
+ HloSchedule schedule(module.get());
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization(
/*memory_limit_bytes=*/20 * 1024,
- module.get(), &sequence));
+ module.get(), &schedule));
// No instructions should have been materialized.
EXPECT_FALSE(changed);
@@ -242,10 +244,10 @@ TEST_F(HloRematerializationTest, RematerializeAroundWhile) {
// The body computation uses 16KB and the entry computation uses 2KB at the
// while so the peak memory use of the module is 18KB. Set the memory limit a
// bit lower (17KB) to force rematerialization of the entry computation.
- SequentialHloOrdering::HloModuleSequence sequence;
+ HloSchedule schedule(module.get());
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization(
/*memory_limit_bytes=*/17 * 1024,
- module.get(), &sequence));
+ module.get(), &schedule));
EXPECT_TRUE(changed);
// Only the entry computation should have a rematerialized instruction added.
@@ -276,10 +278,10 @@ TEST_F(HloRematerializationTest, RematerializeEntryAndWhileBody) {
EXPECT_EQ(entry_computation->instruction_count(), 7);
EXPECT_EQ(body_computation->instruction_count(), 8);
- SequentialHloOrdering::HloModuleSequence sequence;
+ HloSchedule schedule(module.get());
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization(
/*memory_limit_bytes=*/15 * 1024,
- module.get(), &sequence));
+ module.get(), &schedule));
EXPECT_TRUE(changed);
// Both computations should have rematerialized instructions added.
@@ -316,10 +318,10 @@ TEST_F(HloRematerializationTest, RematerializeNestedComputations) {
// If all computations are maximally rematerialized then peak memory usage is
// ~12K so pick something slightly larger.
- SequentialHloOrdering::HloModuleSequence sequence;
+ HloSchedule schedule(module.get());
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization(
/*memory_limit_bytes=*/13 * 1024,
- module.get(), &sequence));
+ module.get(), &schedule));
EXPECT_TRUE(changed);
// All computations should have rematerialized instructions added.
@@ -382,14 +384,14 @@ TEST_F(HloRematerializationTest, RngNotRematerialized) {
ASSERT_EQ(count_rngs(entry_computation), 1);
const int64 original_instruction_count =
entry_computation->instruction_count();
- SequentialHloOrdering::HloModuleSequence sequence;
+ HloSchedule schedule(module.get());
// Pick a memory limit some where between 24KB (initial peak memory including
// parameter and output) and 20KB (peak memory possible with
// rematerialization).
TF_ASSERT_OK_AND_ASSIGN(
bool changed, RunHloRematerialization(
/*memory_limit_bytes=*/4 * ByteSizeOf(vec1024_shape_),
- module.get(), &sequence));
+ module.get(), &schedule));
EXPECT_TRUE(changed);
// The rng should not have been rematerialized.
EXPECT_EQ(count_rngs(entry_computation), 1);
@@ -476,13 +478,13 @@ TEST_F(HloRematerializationTest, InstructionRematerializedMultipleTimes) {
EXPECT_EQ(add_3->operand(0), bcast);
EXPECT_EQ(add_4->operand(0), bcast);
- SequentialHloOrdering::HloModuleSequence sequence;
+ HloSchedule schedule(module.get());
// Pick a memory limit some where between 24KB (initial peak memory including
// parameter and output) and 20KB (peak memory possible with
// rematerialization).
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization(
/*memory_limit_bytes=*/22 * 1024,
- module.get(), &sequence));
+ module.get(), &schedule));
EXPECT_TRUE(changed);
// The broadcast should have been rematerialized 3 times.
@@ -571,13 +573,13 @@ TEST_P(IndirectUseTest, IndirectUseNotRematerialized) {
EXPECT_EQ(entry_computation->instruction_count(), 8);
- SequentialHloOrdering::HloModuleSequence sequence;
+ HloSchedule schedule(module.get());
// Pick a memory limit some where between 24KB (initial peak memory including
// parameter and output) and 20KB (peak memory possible with
// rematerialization).
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization(
/*memory_limit_bytes=*/22 * 1024,
- module.get(), &sequence));
+ module.get(), &schedule));
// Rematerialization should only occur if the rematerializable instruction has
// no indirect uses.
if (indirectly_used) {
diff --git a/tensorflow/compiler/xla/service/hlo_schedule.cc b/tensorflow/compiler/xla/service/hlo_schedule.cc
new file mode 100644
index 0000000000..a65b33bf40
--- /dev/null
+++ b/tensorflow/compiler/xla/service/hlo_schedule.cc
@@ -0,0 +1,291 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/compiler/xla/service/hlo_schedule.h"
+
+#include <queue>
+#include <vector>
+
+#include "absl/strings/str_format.h"
+#include "absl/strings/str_join.h"
+#include "tensorflow/compiler/xla/map_util.h"
+#include "tensorflow/compiler/xla/status_macros.h"
+#include "tensorflow/compiler/xla/util.h"
+#include "tensorflow/core/lib/gtl/map_util.h"
+
+namespace xla {
+
+void HloSchedule::set_sequence(
+ const HloComputation* computation,
+ absl::Span<const HloInstruction* const> sequence) {
+ set_sequence(computation, HloInstructionSequence(sequence));
+}
+
+void HloSchedule::set_sequence(const HloComputation* computation,
+ HloInstructionSequence sequence) {
+ CHECK(computation->parent() == module_);
+ sequences_[computation->unique_id()] = std::move(sequence);
+}
+
+HloInstructionSequence& HloSchedule::GetOrCreateSequence(
+ const HloComputation* computation) {
+ auto it = sequences_.find(computation->unique_id());
+ if (it == sequences_.end()) {
+ // No sequence found for computation. Create and return an empty one.
+ CHECK(computation->parent() == module_);
+ return sequences_[computation->unique_id()];
+ } else {
+ return it->second;
+ }
+}
+
+const HloInstructionSequence& HloSchedule::sequence(
+ const HloComputation* computation) const {
+ return sequences_.at(computation->unique_id());
+}
+
+Status HloSchedule::UpdateComputationSchedule(
+ const HloComputation* computation) {
+ // Map from unique ID to HloInstruction pointer for instructions in the
+ // computation.
+ tensorflow::gtl::FlatMap<int, const HloInstruction*> id_to_instruction;
+ for (const HloInstruction* instruction : computation->instructions()) {
+ InsertOrDie(&id_to_instruction, instruction->unique_id(), instruction);
+ }
+
+ // Set of all HloInstructions in the schedule.
+ tensorflow::gtl::FlatSet<int> ids_in_schedule;
+ for (int id : sequences_.at(computation->unique_id()).ids()) {
+ InsertOrDie(&ids_in_schedule, id);
+ }
+
+ // Map from HloInstruction X to newly added instructions (instruction is in
+ // computation, but not in schedule) which use X. If an instruction is not in
+ // the map, then it has no users which are newly added instructions.
+ tensorflow::gtl::FlatMap<const HloInstruction*,
+ std::vector<const HloInstruction*>>
+ new_instruction_uses;
+
+ // For each newly added instruction, this is the count of the instruction's
+ // operands that have not yet been scheduled. When this value reaches zero,
+ // then the instruction may be placed in the schedule.
+ tensorflow::gtl::FlatMap<const HloInstruction*, int>
+ unscheduled_operand_count;
+
+ // Create a worklist of newly added instructions which are ready to be added
+ // to the schedule. Initialize worklist with those that have zero operands.
+ std::queue<const HloInstruction*> worklist;
+
+ for (const HloInstruction* instruction : computation->instructions()) {
+ if (ids_in_schedule.count(instruction->unique_id()) == 0) {
+ // This is a newly added instruction which is not in the schedule.
+ if (instruction->operands().empty()) {
+ worklist.push(instruction);
+ } else {
+ for (const HloInstruction* operand : instruction->operands()) {
+ new_instruction_uses[operand].push_back(instruction);
+ }
+ unscheduled_operand_count[instruction] = instruction->operand_count();
+ }
+ }
+ }
+
+ // Update the schedule with the newly added instructions, and remove any
+ // instructions no longer in the graph.
+ HloInstructionSequence new_sequence;
+
+ // Lambda which schedules all instructions on the worklist.
+ auto schedule_worklist = [&]() {
+ while (!worklist.empty()) {
+ const HloInstruction* instruction = worklist.front();
+ worklist.pop();
+ new_sequence.push_back(instruction);
+ std::vector<const HloInstruction*>* new_users =
+ tensorflow::gtl::FindOrNull(new_instruction_uses, instruction);
+ if (new_users != nullptr) {
+ // This just-scheduled instruction has users which are newly added to
+ // the module. Update the number of unscheduled operands and push the
+ // newly added instruction to the worklist if it is ready to
+ // schedule.
+ for (const HloInstruction* new_user : *new_users) {
+ unscheduled_operand_count.at(new_user)--;
+ CHECK_GE(unscheduled_operand_count.at(new_user), 0);
+ if (unscheduled_operand_count.at(new_user) == 0) {
+ worklist.push(new_user);
+ }
+ }
+ }
+ }
+ };
+
+ schedule_worklist();
+ for (int id : sequences_.at(computation->unique_id()).ids()) {
+ auto it = id_to_instruction.find(id);
+ if (it == id_to_instruction.end()) {
+ // This instruction in the schedule is no longer in the module. Do not add
+ // it to the new schedule.
+ continue;
+ }
+ worklist.push(it->second);
+ schedule_worklist();
+ }
+
+ set_sequence(computation, std::move(new_sequence));
+ return Status::OK();
+}
+
+Status HloSchedule::Update() {
+ // The schedule must contain a sequence for every non-fusion computation in
+ // the module, but can have sequences for computations which no longer exist
+ // (these are removed).
+ std::vector<HloComputation*> nonfusion_computations =
+ module_->MakeNonfusionComputations();
+ for (const HloComputation* computation : nonfusion_computations) {
+ TF_RET_CHECK(sequences_.count(computation->unique_id()) == 1)
+ << "Computation " << computation->name() << " not in HloSchedule.";
+ }
+ if (sequences_.size() > nonfusion_computations.size()) {
+ // Schedule contains some computations which have been removed from the
+ // HloModule. Remove them from the schedule as well.
+ tensorflow::gtl::FlatSet<int64> nonfusion_computations_ids;
+ for (const HloComputation* computation : nonfusion_computations) {
+ nonfusion_computations_ids.insert(computation->unique_id());
+ }
+ for (auto it = sequences_.begin(); it != sequences_.end();) {
+ if (nonfusion_computations_ids.count(it->first) == 0) {
+ it = sequences_.erase(it);
+ } else {
+ it++;
+ }
+ }
+ }
+ CHECK_EQ(sequences_.size(), nonfusion_computations.size());
+
+ for (const HloComputation* computation : nonfusion_computations) {
+ TF_RETURN_IF_ERROR(UpdateComputationSchedule(computation));
+ }
+
+ TF_RETURN_IF_ERROR(Verify());
+ return Status::OK();
+}
+
+Status HloSchedule::Verify() const {
+ VLOG(2) << "VerifySchedule()";
+ XLA_VLOG_LINES(3, module_->ToString());
+ XLA_VLOG_LINES(2, ToString());
+
+ // Verify schedule contains exactly the same set of non-fusion computations as
+ // module currently does.
+ std::vector<HloComputation*> nonfusion_computations =
+ module_->MakeNonfusionComputations();
+ TF_RET_CHECK(nonfusion_computations.size() == sequences_.size())
+ << "Schedule has " << sequences_.size() << " sequences, but module has "
+ << nonfusion_computations.size() << " non-fusion computations";
+ for (const HloComputation* computation : nonfusion_computations) {
+ TF_RET_CHECK(sequences_.count(computation->unique_id()) == 1)
+ << "Computation " << computation->name()
+ << " missing from HLO schedule.";
+ }
+
+ // For each computation verify the set of instructions is the same and that
+ // each dependency and control edge is honored.
+ for (const HloComputation* computation : nonfusion_computations) {
+ tensorflow::gtl::FlatMap<const HloInstruction*, int> instruction_position;
+ int pos = 0;
+ for (const HloInstruction* instruction :
+ sequence(computation).instructions()) {
+ TF_RET_CHECK(instruction_position.insert({instruction, pos}).second)
+ << "Instruction " << instruction->name()
+ << " appears more than once in the schedule";
+ pos++;
+ }
+
+ TF_RET_CHECK(instruction_position.size() ==
+ computation->instruction_count());
+ for (const HloInstruction* instruction : computation->instructions()) {
+ TF_RET_CHECK(instruction_position.count(instruction) == 1)
+ << "Instruction " << instruction->name() << " is not in schedule";
+ }
+
+ for (const HloInstruction* instruction : computation->instructions()) {
+ for (const HloInstruction* operand : instruction->operands()) {
+ TF_RET_CHECK(instruction_position.at(operand) <
+ instruction_position.at(instruction))
+ << "Instruction " << instruction->name()
+ << " is not scheduled after its operand " << operand->name();
+ }
+
+ for (const HloInstruction* pred : instruction->control_predecessors()) {
+ TF_RET_CHECK(instruction_position.at(pred) <
+ instruction_position.at(instruction))
+ << "Instruction " << instruction->name()
+ << " is not scheduled after its control predecessor "
+ << pred->name();
+ }
+ }
+ }
+
+ return Status::OK();
+}
+
+namespace {
+
+// Returns the computation in the given module with the given unique ID. Returns
+// nullptr if no such computation exists.
+const HloComputation* IdToComputation(const HloModule* module, int64 id) {
+ for (const HloComputation* computation : module->computations()) {
+ if (computation->unique_id() == id) {
+ return computation;
+ }
+ }
+ return nullptr;
+}
+
+} // namespace
+
+string HloSchedule::ToString() const {
+ std::vector<string> pieces;
+
+ pieces.push_back("HloSchedule");
+ for (const auto& id_sequence : sequences_) {
+ const HloComputation* computation =
+ IdToComputation(module_, id_sequence.first);
+ if (computation == nullptr) {
+ // The computation is not in the module and may have been deleted so it is
+ // not safe to dereference any HLO pointers. Just use the HLO unique ids
+ // stored in this object.
+ pieces.push_back(
+ absl::StrFormat("computation with id %d (no longer in HLO module):",
+ id_sequence.first));
+ for (int id : id_sequence.second.ids()) {
+ pieces.push_back(absl::StrCat(" ", id));
+ }
+ } else {
+ pieces.push_back(absl::StrFormat("computation %s:", computation->name()));
+ for (const HloInstruction* instruction :
+ id_sequence.second.instructions()) {
+ pieces.push_back(absl::StrCat(" ", instruction->name()));
+ }
+ }
+ }
+ return absl::StrJoin(pieces, "\n");
+}
+
+std::ostream& operator<<(std::ostream& out, const HloSchedule& schedule) {
+ out << schedule.ToString();
+ return out;
+}
+
+} // namespace xla
diff --git a/tensorflow/compiler/xla/service/hlo_schedule.h b/tensorflow/compiler/xla/service/hlo_schedule.h
new file mode 100644
index 0000000000..21c6988638
--- /dev/null
+++ b/tensorflow/compiler/xla/service/hlo_schedule.h
@@ -0,0 +1,151 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_HLO_SCHEDULE_H_
+#define TENSORFLOW_COMPILER_XLA_SERVICE_HLO_SCHEDULE_H_
+
+#include <vector>
+
+#include "absl/types/span.h"
+#include "tensorflow/compiler/xla/service/hlo_computation.h"
+#include "tensorflow/compiler/xla/service/hlo_instruction.h"
+#include "tensorflow/compiler/xla/service/hlo_module.h"
+#include "tensorflow/compiler/xla/service/hlo_schedule.h"
+#include "tensorflow/compiler/xla/status.h"
+
+namespace xla {
+
+// Class representing a sequence of HLO instructions such as the sequential
+// execution order of an HLO computation.
+class HloInstructionSequence {
+ public:
+ HloInstructionSequence() = default;
+ HloInstructionSequence(absl::Span<const HloInstruction* const> instructions) {
+ for (const HloInstruction* instruction : instructions) {
+ push_back(instruction);
+ }
+ }
+
+ // Adds the instruction to the end of the sequence.
+ void push_back(const HloInstruction* instruction) {
+ instruction_sequence_.push_back(instruction);
+ id_sequence_.push_back(instruction->unique_id());
+ }
+
+ // Clears the sequence of all instructions.
+ void clear() {
+ instruction_sequence_.clear();
+ id_sequence_.clear();
+ }
+
+ int64 size() const { return instruction_sequence_.size(); }
+
+ // Returns the sequence of HLO instructions.
+ const std::vector<const HloInstruction*>& instructions() const {
+ return instruction_sequence_;
+ }
+
+ // Returns the unique IDs of the instructions in the sequence (in order).
+ const std::vector<int>& ids() const { return id_sequence_; }
+
+ private:
+ // The sequence as HloInstructions.
+ std::vector<const HloInstruction*> instruction_sequence_;
+
+ // The sequence of HLO instructions, represented by their unique IDs. The
+ // sequence is stored as both HloInstructions and unique IDs because the
+ // sequence may be referenced after transformations to the HLO graph and HLO
+ // pointers can be invalidated or recycled in this process (see
+ // HloSchedule::Update).
+ std::vector<int> id_sequence_;
+};
+
+// A class representing a sequential schedule of instructions for an HLO
+// module. A complete HLO schedule contains an instruction sequence for every
+// non-fusion computation in the HLO module.
+class HloSchedule {
+ public:
+ HloSchedule(const HloModule* module) : module_(module) {}
+
+ // Returns a reference to the sequence for the given computation.
+ const HloInstructionSequence& sequence(
+ const HloComputation* computation) const;
+
+ // Returns the sequence for the given computation. An empty sequence is
+ // created if none exists for the computation.
+ HloInstructionSequence& GetOrCreateSequence(
+ const HloComputation* computation);
+
+ // Sets the sequence for the given computation to the given sequence.
+ void set_sequence(const HloComputation* computation,
+ absl::Span<const HloInstruction* const> sequence);
+ void set_sequence(const HloComputation* computation,
+ HloInstructionSequence sequence);
+
+ // Returns a map from HloComputation unique ID to instruction sequence. The
+ // map contains all sequences in the schedule.
+ const tensorflow::gtl::FlatMap<int64, HloInstructionSequence>& sequences()
+ const {
+ return sequences_;
+ }
+
+ // Returns true if the schedule has a sequence for the given computation.
+ bool is_computation_scheduled(const HloComputation* computation) const {
+ return sequences_.count(computation->unique_id()) == 1;
+ }
+
+ // Updates the schedule such that it is (again) a valid schedule for the
+ // module. This is used to update a schedule after the HLO module has been
+ // transformed in some way. In general, the only transformations to the module
+ // for which a schedule can be updated is the addition or removal of
+ // instructions and removal of computations. Updating the schedule after new
+ // dependencies between existing instructions in the module is not supported
+ // and may result in an error status returned.
+ //
+ // Instructions in the module which also exist in the given schedule will
+ // remain in the same order in the updated schedule. Instructions which exist
+ // in the module but not in the given schedule will be placed as early as
+ // possible in the updated schedule.
+ Status Update();
+
+ // Verifies that the given schedule is valid for the given module.
+ // Specifically, the schedule contains exactly the instructions in the
+ // non-fusion computations in the module and every dependency in the module is
+ // satisfied in the schedule.
+ Status Verify() const;
+
+ string ToString() const;
+
+ bool empty() const { return sequences_.empty(); }
+
+ const HloModule* module() const { return module_; }
+
+ private:
+ // Updates the instruction sequence for the given computation.
+ Status UpdateComputationSchedule(const HloComputation* computation);
+
+ const HloModule* module_;
+
+ // A map from computation unique ID to instruction sequence. Unique IDs are
+ // used rather than HloComputation pointers because HLO pointers are not
+ // unique across HLO transformations because pointers may be recycled.
+ tensorflow::gtl::FlatMap<int64, HloInstructionSequence> sequences_;
+};
+
+std::ostream& operator<<(std::ostream& out, const HloSchedule& schedule);
+
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_SERVICE_HLO_SCHEDULE_H_
diff --git a/tensorflow/compiler/xla/service/hlo_schedule_test.cc b/tensorflow/compiler/xla/service/hlo_schedule_test.cc
new file mode 100644
index 0000000000..eb52582bb5
--- /dev/null
+++ b/tensorflow/compiler/xla/service/hlo_schedule_test.cc
@@ -0,0 +1,341 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/compiler/xla/service/hlo_schedule.h"
+
+#include <memory>
+#include <string>
+
+#include "absl/algorithm/container.h"
+#include "tensorflow/compiler/xla/service/hlo_computation.h"
+#include "tensorflow/compiler/xla/service/hlo_dce.h"
+#include "tensorflow/compiler/xla/service/hlo_instruction.h"
+#include "tensorflow/compiler/xla/service/hlo_opcode.h"
+#include "tensorflow/compiler/xla/service/hlo_ordering.h"
+#include "tensorflow/compiler/xla/service/hlo_parser.h"
+#include "tensorflow/compiler/xla/service/hlo_scheduling.h"
+#include "tensorflow/compiler/xla/shape_util.h"
+#include "tensorflow/compiler/xla/tests/hlo_test_base.h"
+#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/lib/core/status_test_util.h"
+
+namespace xla {
+namespace {
+
+class HloScheduleTest : public HloTestBase {};
+
+TEST_F(HloScheduleTest, UpdateScheduleUnchangedModule) {
+ // Updating the schedule of an unchanged HLO module should not affect the
+ // schedule at all.
+ const string module_str = R"(
+HloModule UpdateScheduleUnchanged
+
+ENTRY main {
+ a = f32[] parameter(0)
+ b = f32[] parameter(1)
+ c = f32[] constant(42.0)
+ sum = f32[] add(a, b)
+ neg = f32[] negate(c)
+ ROOT root = f32[] multiply(sum, neg)
+}
+)";
+ TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
+ ParseHloString(module_str));
+ TF_ASSERT_OK_AND_ASSIGN(
+ HloSchedule schedule,
+ ScheduleModule(*module, [](const BufferValue& buffer) {
+ return ShapeUtil::ByteSizeOf(buffer.shape());
+ }));
+ const std::vector<const HloInstruction*>& entry_schedule =
+ schedule.sequence(module->entry_computation()).instructions();
+
+ EXPECT_EQ(entry_schedule.size(), 6);
+
+ TF_ASSERT_OK(schedule.Update());
+ TF_ASSERT_OK(schedule.Verify());
+
+ EXPECT_EQ(entry_schedule,
+ schedule.sequence(module->entry_computation()).instructions());
+}
+
+TEST_F(HloScheduleTest, UpdateScheduleWithNewInstructions) {
+ // Add some additional instructions to a module and verify the schedule can be
+ // updated.
+ const string module_str = R"(
+HloModule UpdateScheduleWithNewInstructions
+
+ENTRY main {
+ a = f32[] parameter(0)
+ b = f32[] parameter(1)
+ c = f32[] constant(42.0)
+ sum = f32[] add(a, b)
+ neg = f32[] negate(c)
+ ROOT root = f32[] multiply(sum, neg)
+}
+)";
+ TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
+ ParseHloString(module_str));
+ TF_ASSERT_OK_AND_ASSIGN(
+ HloSchedule schedule,
+ ScheduleModule(*module, [](const BufferValue& buffer) {
+ return ShapeUtil::ByteSizeOf(buffer.shape());
+ }));
+
+ HloComputation* entry = module->entry_computation();
+ const Shape shape = entry->root_instruction()->shape();
+ HloInstruction* constant = entry->AddInstruction(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
+ HloInstruction* sub = entry->AddInstruction(HloInstruction::CreateBinary(
+ shape, HloOpcode::kSubtract, constant, entry->root_instruction()));
+ entry->set_root_instruction(sub);
+
+ auto in_schedule = [&](const HloInstruction* hlo) {
+ return absl::c_linear_search(schedule.sequence(entry).instructions(), hlo);
+ };
+
+ EXPECT_EQ(schedule.sequence(entry).size(), 6);
+ EXPECT_FALSE(in_schedule(constant));
+ EXPECT_FALSE(in_schedule(sub));
+
+ ASSERT_IS_NOT_OK(schedule.Verify());
+ TF_ASSERT_OK(schedule.Update());
+ TF_ASSERT_OK(schedule.Verify());
+
+ EXPECT_EQ(schedule.sequence(entry).size(), 8);
+ EXPECT_TRUE(in_schedule(constant));
+ EXPECT_TRUE(in_schedule(sub));
+}
+
+TEST_F(HloScheduleTest, UpdateScheduleWithAddedAndDeletedInstruction) {
+ // Add and delete some instructions from a module and verify that the schedule
+ // can be updated successfully.
+ const string module_str = R"(
+HloModule UpdateScheduleWithAddedAndDeletedInstruction
+
+ENTRY main {
+ a = f32[] parameter(0)
+ b = f32[] parameter(1)
+ c = f32[] constant(42.0)
+ sum = f32[] add(a, b)
+ neg = f32[] negate(c)
+ ROOT root = f32[] multiply(sum, neg)
+}
+)";
+
+ TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
+ ParseHloString(module_str));
+ TF_ASSERT_OK_AND_ASSIGN(
+ HloSchedule schedule,
+ ScheduleModule(*module, [](const BufferValue& buffer) {
+ return ShapeUtil::ByteSizeOf(buffer.shape());
+ }));
+
+ // Set the entry root to some expression containing just a parameter and a
+ // constant.
+ HloComputation* entry = module->entry_computation();
+ HloInstruction* constant = entry->AddInstruction(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
+ HloInstruction* new_root = entry->AddInstruction(
+ HloInstruction::CreateBinary(constant->shape(), HloOpcode::kSubtract,
+ constant, entry->parameter_instruction(0)));
+ entry->set_root_instruction(new_root);
+
+ // DCE should remove everything but the parameters and the newly added code.
+ HloDCE dce;
+ TF_ASSERT_OK(dce.Run(module.get()).status());
+
+ EXPECT_EQ(schedule.sequence(entry).size(), 6);
+
+ ASSERT_IS_NOT_OK(schedule.Verify());
+ TF_ASSERT_OK(schedule.Update());
+ TF_ASSERT_OK(schedule.Verify());
+
+ EXPECT_EQ(schedule.sequence(entry).size(), 4);
+}
+
+TEST_F(HloScheduleTest, UpdateScheduleWithCompletelyReplacedModule) {
+ // Completely replace a module with an entirely new set of instructions and
+ // verify that the schedule can be updated successfully.
+ const string module_str = R"(
+HloModule UpdateScheduleWithCompletelyReplacedModule
+
+ENTRY main {
+ a = f32[] constant(42.0)
+ b = f32[] constant(123.0)
+ ROOT sum = f32[] add(a, b)
+}
+)";
+
+ TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
+ ParseHloString(module_str));
+ TF_ASSERT_OK_AND_ASSIGN(
+ HloSchedule schedule,
+ ScheduleModule(*module, [](const BufferValue& buffer) {
+ return ShapeUtil::ByteSizeOf(buffer.shape());
+ }));
+
+ // Replace the entry computation with the negation of a constant.
+ HloComputation* entry = module->entry_computation();
+ HloInstruction* constant = entry->AddInstruction(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
+ HloInstruction* new_root = entry->AddInstruction(HloInstruction::CreateUnary(
+ constant->shape(), HloOpcode::kNegate, constant));
+ entry->set_root_instruction(new_root);
+
+ // DCE the old instructions.
+ HloDCE dce;
+ TF_ASSERT_OK(dce.Run(module.get()).status());
+
+ EXPECT_EQ(schedule.sequence(entry).size(), 3);
+
+ ASSERT_IS_NOT_OK(schedule.Verify());
+ TF_ASSERT_OK(schedule.Update());
+ TF_ASSERT_OK(schedule.Verify());
+
+ EXPECT_EQ(schedule.sequence(entry).size(), 2);
+}
+
+TEST_F(HloScheduleTest, UpdateScheduleWithMultipleComputations) {
+ // Create changes to more than one computation in an HLO module and verify
+ // that the schedule can be updated.
+ const string module_str = R"(
+HloModule UpdateScheduleWithMultipleComputations
+
+%Body (param.1: (s32[], token[])) -> (s32[], token[]) {
+ %param.1 = (s32[], token[]) parameter(0)
+ %get-tuple-element.1 = s32[] get-tuple-element((s32[], token[]) %param.1), index=0
+ %constant.1 = s32[] constant(1)
+ %add = s32[] add(s32[] %get-tuple-element.1, s32[] %constant.1)
+ %get-tuple-element.2 = token[] get-tuple-element((s32[], token[]) %param.1), index=1
+ %after-all = token[] after-all(token[] %get-tuple-element.2)
+ ROOT %tuple = (s32[], token[]) tuple(s32[] %add, token[] %after-all)
+}
+
+%Cond (param: (s32[], token[])) -> pred[] {
+ %param = (s32[], token[]) parameter(0)
+ %get-tuple-element = s32[] get-tuple-element((s32[], token[]) %param), index=0
+ %constant = s32[] constant(42)
+ ROOT %less-than = pred[] less-than(s32[] %get-tuple-element, s32[] %constant)
+}
+
+ENTRY %WhileLoop () -> s32[] {
+ %zero = s32[] constant(0)
+ %init_token = token[] after-all()
+ %init_tuple = (s32[], token[]) tuple(s32[] %zero, token[] %init_token)
+ %while = (s32[], token[]) while((s32[], token[]) %init_tuple), condition=%Cond, body=%Body
+ ROOT %root = s32[] get-tuple-element((s32[], token[]) %while), index=0
+}
+)";
+
+ TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
+ ParseHloString(module_str));
+ TF_ASSERT_OK_AND_ASSIGN(
+ HloSchedule schedule,
+ ScheduleModule(*module, [](const BufferValue& buffer) {
+ return ShapeUtil::ByteSizeOf(buffer.shape(),
+ /*pointer_size=*/sizeof(void*));
+ }));
+
+ const HloInstruction* xla_while =
+ module->entry_computation()->root_instruction()->operand(0);
+ HloComputation* body = xla_while->while_body();
+ HloComputation* cond = xla_while->while_condition();
+
+ // Negate the root of the cond.
+ cond->set_root_instruction(cond->AddInstruction(
+ HloInstruction::CreateUnary(ShapeUtil::MakeShape(PRED, {}),
+ HloOpcode::kNot, cond->root_instruction())));
+
+ // Replace the body with a computation which just passes through its
+ // parameter.
+ body->set_root_instruction(body->parameter_instruction(0));
+
+ // DCE the dead code in the body.
+ HloDCE dce;
+ TF_ASSERT_OK(dce.Run(module.get()).status());
+
+ EXPECT_EQ(schedule.sequence(body).size(), 7);
+ EXPECT_EQ(schedule.sequence(cond).size(), 4);
+
+ ASSERT_IS_NOT_OK(schedule.Verify());
+ TF_ASSERT_OK(schedule.Update());
+ TF_ASSERT_OK(schedule.Verify());
+
+ EXPECT_EQ(schedule.sequence(body).size(), 1);
+ EXPECT_EQ(schedule.sequence(cond).size(), 5);
+}
+
+TEST_F(HloScheduleTest, UpdateScheduleComputationRemoved) {
+ // Remove computations from a module and verify the schedule can be updated.
+ const string module_str = R"(
+HloModule UpdateScheduleWithMultipleComputations
+
+%Body (param.1: (s32[], token[])) -> (s32[], token[]) {
+ %param.1 = (s32[], token[]) parameter(0)
+ %get-tuple-element.1 = s32[] get-tuple-element((s32[], token[]) %param.1), index=0
+ %constant.1 = s32[] constant(1)
+ %add = s32[] add(s32[] %get-tuple-element.1, s32[] %constant.1)
+ %get-tuple-element.2 = token[] get-tuple-element((s32[], token[]) %param.1), index=1
+ %after-all = token[] after-all(token[] %get-tuple-element.2)
+ ROOT %tuple = (s32[], token[]) tuple(s32[] %add, token[] %after-all)
+}
+
+%Cond (param: (s32[], token[])) -> pred[] {
+ %param = (s32[], token[]) parameter(0)
+ %get-tuple-element = s32[] get-tuple-element((s32[], token[]) %param), index=0
+ %constant = s32[] constant(42)
+ ROOT %less-than = pred[] less-than(s32[] %get-tuple-element, s32[] %constant)
+}
+
+ENTRY %WhileLoop () -> s32[] {
+ %zero = s32[] constant(0)
+ %init_token = token[] after-all()
+ %init_tuple = (s32[], token[]) tuple(s32[] %zero, token[] %init_token)
+ %while = (s32[], token[]) while((s32[], token[]) %init_tuple), condition=%Cond, body=%Body
+ ROOT %root = s32[] get-tuple-element((s32[], token[]) %while), index=0
+}
+)";
+
+ TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
+ ParseHloString(module_str));
+ TF_ASSERT_OK_AND_ASSIGN(
+ HloSchedule schedule,
+ ScheduleModule(*module, [](const BufferValue& buffer) {
+ return ShapeUtil::ByteSizeOf(buffer.shape(),
+ /*pointer_size=*/sizeof(void*));
+ }));
+
+ HloInstruction* xla_while =
+ module->entry_computation()->root_instruction()->mutable_operand(0);
+ HloInstruction* init = xla_while->mutable_operand(0);
+
+ // Replace the while with its init value. The conditional and body
+ // computations should then be dead.
+ TF_ASSERT_OK(xla_while->ReplaceAllUsesWith(init));
+
+ // DCE the dead code in the body.
+ HloDCE dce;
+ ASSERT_EQ(module->computation_count(), 3);
+ TF_ASSERT_OK(dce.Run(module.get()).status());
+ ASSERT_EQ(module->computation_count(), 1);
+
+ ASSERT_IS_NOT_OK(schedule.Verify());
+ TF_ASSERT_OK(schedule.Update());
+ TF_ASSERT_OK(schedule.Verify());
+}
+
+} // namespace
+} // namespace xla
diff --git a/tensorflow/compiler/xla/service/hlo_scheduling.cc b/tensorflow/compiler/xla/service/hlo_scheduling.cc
index 0fc3b268c0..9bfb0af96c 100644
--- a/tensorflow/compiler/xla/service/hlo_scheduling.cc
+++ b/tensorflow/compiler/xla/service/hlo_scheduling.cc
@@ -70,7 +70,7 @@ class ListScheduler {
public:
// Construct and return a memory-minimizing sequence of HLO instructions
// containing the given HLO computation.
- static StatusOr<std::vector<const HloInstruction*>> Run(
+ static StatusOr<HloInstructionSequence> Run(
const HloComputation& computation,
const TuplePointsToAnalysis& points_to_analysis,
const LogicalBuffer::SizeFunction& size_function,
@@ -229,8 +229,8 @@ class ListScheduler {
return {BytesFreedIfScheduled(entry), entry.instruction->user_count()};
}
- std::vector<const HloInstruction*> CreateSchedule() {
- std::vector<const HloInstruction*> schedule;
+ HloInstructionSequence CreateSchedule() {
+ HloInstructionSequence schedule;
// Populate the ready list with instructions which have no operands or
// control predecessors.
@@ -374,7 +374,7 @@ int64 SumLogicalBufferSizes(
return size;
}
-StatusOr<std::vector<const HloInstruction*>> ScheduleComputationHelper(
+StatusOr<HloInstructionSequence> ScheduleComputationHelper(
const HloComputation& computation,
const TuplePointsToAnalysis& points_to_analysis,
const LogicalBuffer::SizeFunction& size_function,
@@ -392,7 +392,7 @@ StatusOr<std::vector<const HloInstruction*>> ScheduleComputationHelper(
} // namespace
-StatusOr<std::vector<const HloInstruction*>> DFSMemoryScheduler(
+StatusOr<HloInstructionSequence> DFSMemoryScheduler(
const HloComputation& computation,
const TuplePointsToAnalysis& points_to_analysis,
const LogicalBuffer::SizeFunction& size_function,
@@ -443,7 +443,7 @@ StatusOr<std::vector<const HloInstruction*>> DFSMemoryScheduler(
// Construct a total order based on DFS post-order, visiting operands in
// decreasing cumulative extra user order, and next by cumulative size, with a
// tiebreaker by name for determinism.
- std::vector<const HloInstruction*> sequence;
+ HloInstructionSequence sequence;
FunctionVisitor visitor([&sequence](HloInstruction* hlo) {
sequence.push_back(hlo);
return Status::OK();
@@ -463,7 +463,7 @@ StatusOr<std::vector<const HloInstruction*>> DFSMemoryScheduler(
return sequence;
} // namespace xla
-StatusOr<std::vector<const HloInstruction*>> ListMemoryScheduler(
+StatusOr<HloInstructionSequence> ListMemoryScheduler(
const HloComputation& computation,
const TuplePointsToAnalysis& points_to_analysis,
const LogicalBuffer::SizeFunction& size_function,
@@ -473,18 +473,16 @@ StatusOr<std::vector<const HloInstruction*>> ListMemoryScheduler(
memory_by_computation);
}
-StatusOr<std::vector<const HloInstruction*>> PostOrderMemoryScheduler(
+StatusOr<HloInstructionSequence> PostOrderMemoryScheduler(
const HloComputation& computation,
const TuplePointsToAnalysis& points_to_analysis,
const LogicalBuffer::SizeFunction& size_function,
const tensorflow::gtl::FlatMap<const HloComputation*, int64>&
memory_by_computation) {
- const auto& post_order = computation.MakeInstructionPostOrder();
- return std::vector<const HloInstruction*>{post_order.begin(),
- post_order.end()};
+ return HloInstructionSequence(computation.MakeInstructionPostOrder());
}
-StatusOr<std::vector<const HloInstruction*>> DefaultMemoryScheduler(
+StatusOr<HloInstructionSequence> DefaultMemoryScheduler(
const HloComputation& computation,
const TuplePointsToAnalysis& points_to_analysis,
const LogicalBuffer::SizeFunction& size_function,
@@ -499,7 +497,7 @@ StatusOr<std::vector<const HloInstruction*>> DefaultMemoryScheduler(
// List wins for most of our benchmarks; postorder-based schedulers win for
// some RNNs.
TF_ASSIGN_OR_RETURN(
- std::vector<const HloInstruction*> list_sequence,
+ HloInstructionSequence list_sequence,
ListMemoryScheduler(computation, points_to_analysis, size_function,
memory_by_computation));
TF_ASSIGN_OR_RETURN(const int64 list_memory,
@@ -508,7 +506,7 @@ StatusOr<std::vector<const HloInstruction*>> DefaultMemoryScheduler(
size_function, &memory_by_computation));
VLOG(2) << "Min-memory list sequence: " << HumanReadableNumBytes(list_memory);
- TF_ASSIGN_OR_RETURN(std::vector<const HloInstruction*> dfs_sequence,
+ TF_ASSIGN_OR_RETURN(HloInstructionSequence dfs_sequence,
DFSMemoryScheduler(computation, points_to_analysis,
size_function, memory_by_computation));
TF_ASSIGN_OR_RETURN(const int64 dfs_memory,
@@ -518,7 +516,7 @@ StatusOr<std::vector<const HloInstruction*>> DefaultMemoryScheduler(
VLOG(2) << "Min-memory dfs sequence: " << HumanReadableNumBytes(dfs_memory);
TF_ASSIGN_OR_RETURN(
- std::vector<const HloInstruction*> post_order_sequence,
+ HloInstructionSequence post_order_sequence,
PostOrderMemoryScheduler(computation, points_to_analysis, size_function,
memory_by_computation));
TF_ASSIGN_OR_RETURN(const int64 post_order_memory,
@@ -545,32 +543,35 @@ StatusOr<std::vector<const HloInstruction*>> DefaultMemoryScheduler(
}
}
-StatusOr<SequentialHloOrdering::HloModuleSequence> ScheduleComputationsInModule(
+StatusOr<HloSchedule> ScheduleModule(
const HloModule& module, const LogicalBuffer::SizeFunction& size_function,
const MemorySchedulerAlgorithm& algorithm) {
- SequentialHloOrdering::HloModuleSequence sequence;
+ HloSchedule schedule(&module);
TF_ASSIGN_OR_RETURN(std::unique_ptr<TuplePointsToAnalysis> points_to_analysis,
TuplePointsToAnalysis::Run(&module));
tensorflow::gtl::FlatMap<const HloComputation*, int64> memory_by_computation;
for (const auto* computation : module.MakeComputationPostOrder()) {
if (!computation->IsFusionComputation()) {
- TF_ASSIGN_OR_RETURN(auto one_computation_sequence,
+ TF_ASSIGN_OR_RETURN(HloInstructionSequence computation_sequence,
ScheduleComputationHelper(
*computation, *points_to_analysis, size_function,
algorithm, memory_by_computation));
memory_by_computation[computation] =
HeapSimulator::MinimumMemoryForComputation(
- *computation, one_computation_sequence, *points_to_analysis,
+ *computation, computation_sequence, *points_to_analysis,
size_function, &memory_by_computation)
.ValueOrDie();
- sequence[computation] = std::move(one_computation_sequence);
+ schedule.set_sequence(computation, std::move(computation_sequence));
}
}
- VLOG(1) << "Module schedule:\n" << sequence;
- return sequence;
+ VLOG(1) << "Module schedule:\n" << schedule;
+
+ TF_RETURN_IF_ERROR(schedule.Verify());
+
+ return std::move(schedule);
}
-StatusOr<std::vector<const HloInstruction*>> ScheduleOneComputation(
+StatusOr<HloInstructionSequence> ScheduleComputation(
const HloComputation& computation,
const LogicalBuffer::SizeFunction& size_function) {
CHECK(!computation.IsFusionComputation());
@@ -581,187 +582,4 @@ StatusOr<std::vector<const HloInstruction*>> ScheduleOneComputation(
size_function, nullptr, empty_map);
}
-tensorflow::gtl::FlatMap<const HloComputation*, std::vector<int>>
-ComputeIdSchedule(const SequentialHloOrdering::HloModuleSequence& sequence) {
- tensorflow::gtl::FlatMap<const HloComputation*, std::vector<int>> id_sequence;
- for (const auto& computation_sequence : sequence) {
- for (const HloInstruction* instruction : computation_sequence.second) {
- id_sequence[computation_sequence.first].push_back(
- instruction->unique_id());
- }
- }
- return id_sequence;
-}
-
-Status UpdateSchedule(
- const HloModule& module,
- const tensorflow::gtl::FlatMap<const HloComputation*, std::vector<int>>&
- id_sequence,
- SequentialHloOrdering::HloModuleSequence* sequence) {
- // Map from unique ID to HloInstruction pointer for instructions in the
- // module.
- tensorflow::gtl::FlatMap<int, const HloInstruction*> id_to_instruction;
- // Set of all HloInstructions in the schedule.
- tensorflow::gtl::FlatSet<int> ids_in_schedule;
- std::vector<HloComputation*> nonfusion_computations =
- module.MakeNonfusionComputations();
- for (const HloComputation* computation : nonfusion_computations) {
- for (const HloInstruction* instruction : computation->instructions()) {
- TF_RET_CHECK(
- id_to_instruction.insert({instruction->unique_id(), instruction})
- .second);
- }
- for (int id : id_sequence.at(computation)) {
- ids_in_schedule.insert(id);
- }
- }
-
- // Map from HloInstruction X to newly added instructions (instruction is in
- // module, but not in schedule) which use X. If an instruction is not in the
- // map, then it has no users which are newly added instructions.
- tensorflow::gtl::FlatMap<const HloInstruction*,
- std::vector<const HloInstruction*>>
- new_instruction_uses;
-
- // For each newly added instruction, this is the count of the instruction's
- // operands that have not yet been scheduled. When this value reaches zero,
- // then the instruction may be placed in the schedule.
- tensorflow::gtl::FlatMap<const HloInstruction*, int>
- unscheduled_operand_count;
- // For each computation, this is the set of newly added instructions which
- // have no operands. These must be handled specially and are added to the
- // beginning of the schedule.
- tensorflow::gtl::FlatMap<const HloComputation*,
- std::vector<const HloInstruction*>>
- new_zero_operand_instructions;
- for (const HloComputation* computation : nonfusion_computations) {
- new_zero_operand_instructions[computation] = {};
- for (const HloInstruction* instruction : computation->instructions()) {
- if (ids_in_schedule.count(instruction->unique_id()) == 0) {
- // This is a newly added instruction which is not in the schedule.
- for (const HloInstruction* operand : instruction->operands()) {
- new_instruction_uses[operand].push_back(instruction);
- }
- if (instruction->operands().empty()) {
- new_zero_operand_instructions[computation].push_back(instruction);
- }
- unscheduled_operand_count[instruction] = instruction->operand_count();
- }
- }
- }
-
- // Update the schedule with the newly added instructions, and remove any
- // instructions no longer in the graph.
- for (const HloComputation* computation : nonfusion_computations) {
- std::vector<const HloInstruction*> old_computation_sequence =
- std::move(sequence->at(computation));
- sequence->at(computation).clear();
-
- // Create a worklist of newly added instructions which are ready to be added
- // to the schedule. Initialize worklist with those that have zero operands.
- std::queue<const HloInstruction*> worklist;
- for (const HloInstruction* instruction :
- new_zero_operand_instructions.at(computation)) {
- worklist.push(instruction);
- }
-
- // Lambda which schedules all instructions on the worklist.
- auto schedule_worklist = [&]() {
- while (!worklist.empty()) {
- const HloInstruction* instruction = worklist.front();
- worklist.pop();
- sequence->at(computation).push_back(instruction);
- std::vector<const HloInstruction*>* new_users =
- tensorflow::gtl::FindOrNull(new_instruction_uses, instruction);
- if (new_users != nullptr) {
- // This just-scheduled instruction has users which are newly added to
- // the module. Update the number of unscheduled operands and push the
- // newly added instruction to the worklist if it is ready to
- // schedule.
- for (const HloInstruction* new_user : *new_users) {
- unscheduled_operand_count.at(new_user)--;
- CHECK_GE(unscheduled_operand_count.at(new_user), 0);
- if (unscheduled_operand_count.at(new_user) == 0) {
- worklist.push(new_user);
- }
- }
- }
- }
- };
-
- schedule_worklist();
- for (int id : id_sequence.at(computation)) {
- auto it = id_to_instruction.find(id);
- if (it == id_to_instruction.end()) {
- // This instruction in the schedule is no longer in the module.
- continue;
- }
- const HloInstruction* instruction = it->second;
- worklist.push(instruction);
- schedule_worklist();
- }
- }
-
- TF_RETURN_IF_ERROR(VerifySchedule(module, *sequence));
- return Status::OK();
-}
-
-Status VerifySchedule(
- const HloModule& module,
- const SequentialHloOrdering::HloModuleSequence& sequence) {
- VLOG(2) << "VerifySchedule()";
- XLA_VLOG_LINES(2, module.ToString());
- VLOG(2) << sequence;
-
- // Verify the set of computations in the sequence is exactly the set of
- // computations in the module.
- std::vector<HloComputation*> nonfusion_computations =
- module.MakeNonfusionComputations();
- TF_RET_CHECK(nonfusion_computations.size() == sequence.size());
- tensorflow::gtl::FlatSet<const HloComputation*> computations_in_module(
- module.computations().begin(), module.computations().end());
- for (const auto& computation_sequence : sequence) {
- TF_RET_CHECK(computations_in_module.count(computation_sequence.first) == 1);
- }
-
- // For each computation verify the set of instructions is the same and that
- // each dependency and control edge is honored.
- for (const HloComputation* computation : nonfusion_computations) {
- tensorflow::gtl::FlatMap<const HloInstruction*, int> instruction_position;
- int pos = 0;
- for (const HloInstruction* instruction : sequence.at(computation)) {
- TF_RET_CHECK(instruction_position.insert({instruction, pos}).second)
- << "Instruction " << instruction->name()
- << " appears more than once in the schedule";
- pos++;
- }
-
- TF_RET_CHECK(instruction_position.size() ==
- computation->instruction_count());
- for (const HloInstruction* instruction : computation->instructions()) {
- TF_RET_CHECK(instruction_position.count(instruction) == 1)
- << "Instruction " << instruction->name() << " is not in schedule";
- }
-
- for (const HloInstruction* instruction : computation->instructions()) {
- for (const HloInstruction* operand : instruction->operands()) {
- TF_RET_CHECK(instruction_position.at(operand) <
- instruction_position.at(instruction))
- << "Instruction " << instruction->name()
- << " is not scheduled after its operand " << operand->name();
- }
-
- for (const HloInstruction* pred : instruction->control_predecessors()) {
- TF_RET_CHECK(instruction_position.at(pred) <
- instruction_position.at(instruction))
- << "Instruction " << instruction->name()
- << " is not scheduled after its control predecessor "
- << pred->name();
- }
- }
- }
-
- return Status::OK();
-}
-
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/hlo_scheduling.h b/tensorflow/compiler/xla/service/hlo_scheduling.h
index d06b8d9a5c..54e32340ba 100644
--- a/tensorflow/compiler/xla/service/hlo_scheduling.h
+++ b/tensorflow/compiler/xla/service/hlo_scheduling.h
@@ -21,6 +21,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_module.h"
#include "tensorflow/compiler/xla/service/hlo_ordering.h"
+#include "tensorflow/compiler/xla/service/hlo_schedule.h"
#include "tensorflow/compiler/xla/service/logical_buffer.h"
#include "tensorflow/compiler/xla/service/tuple_points_to_analysis.h"
#include "tensorflow/compiler/xla/statusor.h"
@@ -32,14 +33,14 @@ namespace xla {
// 'computation' that minimizes peak memory, given a points-to analysis result
// that describes buffer aliasing, together with a target-specific size function
// that maps a tensor's logical size to its padded size.
-typedef std::function<StatusOr<std::vector<const HloInstruction*>>(
+typedef std::function<StatusOr<HloInstructionSequence>(
const HloComputation&, const TuplePointsToAnalysis&,
const LogicalBuffer::SizeFunction&,
const tensorflow::gtl::FlatMap<const HloComputation*, int64>&)>
MemorySchedulerAlgorithm;
// List scheduler
-StatusOr<std::vector<const HloInstruction*>> ListMemoryScheduler(
+StatusOr<HloInstructionSequence> ListMemoryScheduler(
const HloComputation& computation,
const TuplePointsToAnalysis& points_to_analysis,
const LogicalBuffer::SizeFunction& size_function,
@@ -47,7 +48,7 @@ StatusOr<std::vector<const HloInstruction*>> ListMemoryScheduler(
memory_by_computation);
// DFS-order scheduler
-StatusOr<std::vector<const HloInstruction*>> DFSMemoryScheduler(
+StatusOr<HloInstructionSequence> DFSMemoryScheduler(
const HloComputation& computation,
const TuplePointsToAnalysis& points_to_analysis,
const LogicalBuffer::SizeFunction& size_function,
@@ -55,7 +56,7 @@ StatusOr<std::vector<const HloInstruction*>> DFSMemoryScheduler(
memory_by_computation);
// Naive Post Order scheduler
-StatusOr<std::vector<const HloInstruction*>> PostOrderMemoryScheduler(
+StatusOr<HloInstructionSequence> PostOrderMemoryScheduler(
const HloComputation& computation,
const TuplePointsToAnalysis& points_to_analysis,
const LogicalBuffer::SizeFunction& size_function,
@@ -65,63 +66,26 @@ StatusOr<std::vector<const HloInstruction*>> PostOrderMemoryScheduler(
// The default scheduling algorithm. Runs both the list scheduler
// and the DFS scheduler, and chooses whichever returns a lower min-memory,
// not accounting for fragmentation.
-StatusOr<std::vector<const HloInstruction*>> DefaultMemoryScheduler(
+StatusOr<HloInstructionSequence> DefaultMemoryScheduler(
const HloComputation& computation,
const TuplePointsToAnalysis& points_to_analysis,
const LogicalBuffer::SizeFunction& size_function,
const tensorflow::gtl::FlatMap<const HloComputation*, int64>&
memory_by_computation);
-// Returns an HloModuleSequence which seeks to minimize the memory required for
+// Returns an HloSchedule which seeks to minimize the memory required for
// the computation. size_function is the function returning the number of bytes
// required for a LogicalBuffer.
-StatusOr<SequentialHloOrdering::HloModuleSequence> ScheduleComputationsInModule(
+StatusOr<HloSchedule> ScheduleModule(
const HloModule& module, const LogicalBuffer::SizeFunction& size_function,
const MemorySchedulerAlgorithm& algorithm = {});
// Computes the schedule for a single computation.
// Currently only used by the GPU backend.
-StatusOr<std::vector<const HloInstruction*>> ScheduleOneComputation(
+StatusOr<HloInstructionSequence> ScheduleComputation(
const HloComputation& computation,
const LogicalBuffer::SizeFunction& size_function);
-// Transforms the given schedule such that it is (again) a valid schedule for
-// the module. This is used to update a schedule after the HLO module has been
-// transformed in some way. In general, the only transformations to the module
-// for which a schedule can be updated is the addition or removal of
-// instructions to/from the module. Updating the schedule after new dependencies
-// between existing instructions in the module is not supported and may result
-// in an error status returned.
-//
-// Instructions in the module which also exist in the given schedule will remain
-// in the same order in the updated schedule. Instructions which exist in the
-// module but not in the given schedule will be placed as early as possible in
-// the updated schedule.
-//
-// 'id_sequence' is a mirror of the given schedule 'sequence' but with
-// HloInstruction ids rather than HloInstruction pointers. This should be
-// constructed using ComputeIdSchedule below after the schedule is constructed
-// but before the HLO module is transformed.
-Status UpdateSchedule(
- const HloModule& module,
- const tensorflow::gtl::FlatMap<const HloComputation*, std::vector<int>>&
- id_sequence,
- SequentialHloOrdering::HloModuleSequence* sequence);
-
-// Constructs a copy of the given schedule but with HloInstruction unique ids
-// rather than HloInstruction pointers. This is necessary for updating a
-// schedule as HloInstruction points in the schedule may become invalid if
-// instructions are removed from the module. Used by UpdateSchedule above..
-// TODO(b/113175018): Remove this function when HLO schedule is its own class.
-tensorflow::gtl::FlatMap<const HloComputation*, std::vector<int>>
-ComputeIdSchedule(const SequentialHloOrdering::HloModuleSequence& sequence);
-
-// Verifies that the given schedule is valid for the given module. Specifically,
-// the schedule contains exactly the instructions in the module and every
-// dependency in the module is satisfied in the schedule.
-Status VerifySchedule(const HloModule& module,
- const SequentialHloOrdering::HloModuleSequence& sequence);
-
} // namespace xla
#endif // TENSORFLOW_COMPILER_XLA_SERVICE_HLO_SCHEDULING_H_
diff --git a/tensorflow/compiler/xla/service/hlo_scheduling_test.cc b/tensorflow/compiler/xla/service/hlo_scheduling_test.cc
index d49d09d459..6afe51997e 100644
--- a/tensorflow/compiler/xla/service/hlo_scheduling_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_scheduling_test.cc
@@ -18,6 +18,7 @@ limitations under the License.
#include <memory>
#include <string>
+#include "absl/algorithm/container.h"
#include "tensorflow/compiler/xla/service/heap_simulator.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_dce.h"
@@ -67,19 +68,20 @@ TEST_F(HloSchedulingTest, LastUseScheduledFirst) {
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(
- SequentialHloOrdering::HloModuleSequence sequence,
- ScheduleComputationsInModule(*module, [](const BufferValue& buffer) {
+ HloSchedule schedule,
+ ScheduleModule(*module, [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape());
}));
// Verify that all instructions are in the sequence.
- EXPECT_EQ(module->entry_computation()->instruction_count(),
- sequence.at(module->entry_computation()).size());
+ const std::vector<const HloInstruction*>& sequence =
+ schedule.sequence(module->entry_computation()).instructions();
+ EXPECT_EQ(module->entry_computation()->instruction_count(), sequence.size());
// The first instruction should be the parameter and the last the root "sub".
- EXPECT_EQ(param, sequence.at(module->entry_computation()).front());
- EXPECT_EQ(sub, sequence.at(module->entry_computation()).back());
+ EXPECT_EQ(param, sequence.front());
+ EXPECT_EQ(sub, sequence.back());
- SequentialHloOrdering ordering(module.get(), sequence);
+ SequentialHloOrdering ordering(schedule);
EXPECT_TRUE(ordering.ExecutesBefore(add, negate));
}
@@ -108,28 +110,26 @@ ENTRY root {
return ShapeUtil::ByteSizeOf(buffer.shape(), /*pointer_size=*/8);
};
TF_ASSERT_OK_AND_ASSIGN(
- SequentialHloOrdering::HloModuleSequence sequence,
- ScheduleComputationsInModule(*module, size_fn, ListMemoryScheduler));
+ HloSchedule schedule,
+ ScheduleModule(*module, size_fn, ListMemoryScheduler));
// Verify that all instructions are in the sequence.
- EXPECT_EQ(module->entry_computation()->instruction_count(),
- sequence.at(module->entry_computation()).size());
+ const std::vector<const HloInstruction*>& sequence =
+ schedule.sequence(module->entry_computation()).instructions();
+ EXPECT_EQ(module->entry_computation()->instruction_count(), sequence.size());
std::unordered_map<string, const HloInstruction*> instructions_by_name;
- for (const HloInstruction* instruction :
- sequence.at(module->entry_computation())) {
+ for (const HloInstruction* instruction : sequence) {
instructions_by_name[instruction->name()] = instruction;
}
// The first instruction should be the parameter and the last the root.
- EXPECT_EQ(instructions_by_name.at("param"),
- sequence.at(module->entry_computation()).front());
- EXPECT_EQ(instructions_by_name.at("result"),
- sequence.at(module->entry_computation()).back());
+ EXPECT_EQ(instructions_by_name.at("param"), sequence.front());
+ EXPECT_EQ(instructions_by_name.at("result"), sequence.back());
// Instructions "d" and "e" will both be schedulable at the same time, but
// instruction "d" allows us to free the buffer of "p1", so the list scheduler
// should prefer it.
- SequentialHloOrdering ordering(module.get(), sequence);
+ SequentialHloOrdering ordering(schedule);
EXPECT_TRUE(ordering.ExecutesBefore(instructions_by_name.at("d"),
instructions_by_name.at("e")));
}
@@ -220,13 +220,13 @@ TEST_F(HloSchedulingTest, ListAccountsForSubcomputations) {
return ShapeUtil::ByteSizeOf(buffer.shape());
};
TF_ASSERT_OK_AND_ASSIGN(
- SequentialHloOrdering::HloModuleSequence sequence,
- ScheduleComputationsInModule(*module, size_fn, ListMemoryScheduler));
+ HloSchedule schedule,
+ ScheduleModule(*module, size_fn, ListMemoryScheduler));
// Verify that all instructions are in the sequence.
auto entry_computation = module->entry_computation();
EXPECT_EQ(entry_computation->instruction_count(),
- sequence.at(entry_computation).size());
- SequentialHloOrdering ordering(module.get(), sequence);
+ schedule.sequence(entry_computation).size());
+ SequentialHloOrdering ordering(schedule);
// This schedule is an example of List's greedy heuristics being suboptimal.
// The while_loop is more expensive than transpose, so it would have been
// better to schedule it first, instead of during the busy time.
@@ -243,13 +243,13 @@ TEST_F(HloSchedulingTest, ListAccountsForSubcomputations) {
// HeapSimulator doesn't account for subcomputations
EXPECT_EQ(80, HeapSimulator::MinimumMemoryForComputation(
- *entry_computation, sequence.at(entry_computation),
+ *entry_computation, schedule.sequence(entry_computation),
*points_to_analysis, size_fn)
.ValueOrDie());
// HeapSimulator accounts for subcomputations. The output buffer is aliased,
// so we don't double count.
EXPECT_EQ(64, HeapSimulator::MinimumMemoryForComputation(
- *entry_computation, sequence.at(entry_computation),
+ *entry_computation, schedule.sequence(entry_computation),
*points_to_analysis, size_fn, &memory_by_computation)
.ValueOrDie());
}
@@ -281,19 +281,18 @@ TEST_F(HloSchedulingTest, TuplesAreAccountedCorrectly) {
auto module = CreateNewModule();
module->AddEntryComputation(builder.Build());
- TF_ASSERT_OK_AND_ASSIGN(
- SequentialHloOrdering::HloModuleSequence sequence,
- ScheduleComputationsInModule(*module,
- [](const BufferValue& buffer) {
- return ShapeUtil::ByteSizeOf(
- buffer.shape(), TUPLE_SIZE);
- },
- ListMemoryScheduler));
+ TF_ASSERT_OK_AND_ASSIGN(HloSchedule schedule,
+ ScheduleModule(*module,
+ [](const BufferValue& buffer) {
+ return ShapeUtil::ByteSizeOf(
+ buffer.shape(), TUPLE_SIZE);
+ },
+ ListMemoryScheduler));
// Verify that all instructions are in the sequence.
EXPECT_EQ(module->entry_computation()->instruction_count(),
- sequence.at(module->entry_computation()).size());
- SequentialHloOrdering ordering(module.get(), sequence);
+ schedule.sequence(module->entry_computation()).size());
+ SequentialHloOrdering ordering(schedule);
// tuple allocates the tuple buffer and doesn't free anything.
// abs_abs2 uses the same buffer for input/output, so its bytes-freed is 0.
// abs_abs2 should be scheduled before tuple by List.
@@ -332,18 +331,18 @@ TEST_F(HloSchedulingTest, MultiOutputFusionAccountedCorrectly) {
auto fusion = computation->CreateFusionInstruction(
{tuple, mul, add}, HloInstruction::FusionKind::kLoop);
- TF_ASSERT_OK_AND_ASSIGN(SequentialHloOrdering::HloModuleSequence sequence,
- ScheduleComputationsInModule(
- *module,
- [](const BufferValue& buffer) {
- return ShapeUtil::ByteSizeOf(buffer.shape(), 2);
- },
- ListMemoryScheduler));
+ TF_ASSERT_OK_AND_ASSIGN(HloSchedule schedule,
+ ScheduleModule(*module,
+ [](const BufferValue& buffer) {
+ return ShapeUtil::ByteSizeOf(
+ buffer.shape(), 2);
+ },
+ ListMemoryScheduler));
// Verify that all instructions are in the sequence.
EXPECT_EQ(module->entry_computation()->instruction_count(),
- sequence.at(module->entry_computation()).size());
- SequentialHloOrdering ordering(module.get(), sequence);
+ schedule.sequence(module->entry_computation()).size());
+ SequentialHloOrdering ordering(schedule);
// fusion allocates memory for the tuple elements and doesn't free anything,
// so it's more expensive than exp.
EXPECT_TRUE(ordering.ExecutesBefore(exp, fusion));
@@ -391,12 +390,12 @@ TEST_F(HloSchedulingTest, HeapSimulatorAccountsForSubcomputations) {
return ShapeUtil::ByteSizeOf(buffer.shape());
};
TF_ASSERT_OK_AND_ASSIGN(
- SequentialHloOrdering::HloModuleSequence sequence,
- ScheduleComputationsInModule(*module, size_fn, ListMemoryScheduler));
+ HloSchedule schedule,
+ ScheduleModule(*module, size_fn, ListMemoryScheduler));
// Verify that all instructions are in the sequence.
auto entry_computation = module->entry_computation();
- EXPECT_EQ(entry_computation->instruction_count(),
- sequence.at(entry_computation).size());
+ EXPECT_EQ(module->entry_computation()->instruction_count(),
+ schedule.sequence(module->entry_computation()).size());
tensorflow::gtl::FlatMap<const HloComputation*, int64> memory_by_computation;
memory_by_computation[cond_computation] = 17;
@@ -406,262 +405,16 @@ TEST_F(HloSchedulingTest, HeapSimulatorAccountsForSubcomputations) {
// HeapSimulator doesn't account for subcomputations
EXPECT_EQ(16, HeapSimulator::MinimumMemoryForComputation(
- *entry_computation, sequence.at(entry_computation),
+ *entry_computation, schedule.sequence(entry_computation),
*points_to_analysis, size_fn)
.ValueOrDie());
// HeapSimulator accounts for subcomputations. Cond is the largest one.
// The output buffer of the while is aliased.
EXPECT_EQ(17, HeapSimulator::MinimumMemoryForComputation(
- *entry_computation, sequence.at(entry_computation),
+ *entry_computation, schedule.sequence(entry_computation),
*points_to_analysis, size_fn, &memory_by_computation)
.ValueOrDie());
}
-TEST_F(HloSchedulingTest, UpdateScheduleUnchangedModule) {
- // Updating the schedule of an unchanged HLO module should not affect the
- // schedule at all.
- const string module_str = R"(
-HloModule UpdateScheduleUnchanged
-
-ENTRY main {
- a = f32[] parameter(0)
- b = f32[] parameter(1)
- c = f32[] constant(42.0)
- sum = f32[] add(a, b)
- neg = f32[] negate(c)
- ROOT root = f32[] multiply(sum, neg)
-}
-)";
- TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
- ParseHloString(module_str));
- TF_ASSERT_OK_AND_ASSIGN(
- SequentialHloOrdering::HloModuleSequence sequence,
- ScheduleComputationsInModule(*module, [](const BufferValue& buffer) {
- return ShapeUtil::ByteSizeOf(buffer.shape());
- }));
- tensorflow::gtl::FlatMap<const HloComputation*, std::vector<int>>
- id_sequence = ComputeIdSchedule(sequence);
- std::vector<const HloInstruction*> entry_schedule = sequence.begin()->second;
-
- EXPECT_EQ(entry_schedule.size(), 6);
-
- TF_ASSERT_OK(UpdateSchedule(*module, id_sequence, &sequence));
- TF_ASSERT_OK(VerifySchedule(*module, sequence));
-
- EXPECT_EQ(entry_schedule, sequence.begin()->second);
-}
-
-TEST_F(HloSchedulingTest, UpdateScheduleWithNewInstructions) {
- // Add some additional instructions to a module and verify the schedule can be
- // updated.
- const string module_str = R"(
-HloModule UpdateScheduleWithNewInstructions
-
-ENTRY main {
- a = f32[] parameter(0)
- b = f32[] parameter(1)
- c = f32[] constant(42.0)
- sum = f32[] add(a, b)
- neg = f32[] negate(c)
- ROOT root = f32[] multiply(sum, neg)
-}
-)";
- TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
- ParseHloString(module_str));
- TF_ASSERT_OK_AND_ASSIGN(
- SequentialHloOrdering::HloModuleSequence sequence,
- ScheduleComputationsInModule(*module, [](const BufferValue& buffer) {
- return ShapeUtil::ByteSizeOf(buffer.shape());
- }));
- tensorflow::gtl::FlatMap<const HloComputation*, std::vector<int>>
- id_sequence = ComputeIdSchedule(sequence);
-
- HloComputation* entry = module->entry_computation();
- const Shape shape = entry->root_instruction()->shape();
- HloInstruction* constant = entry->AddInstruction(
- HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
- HloInstruction* sub = entry->AddInstruction(HloInstruction::CreateBinary(
- shape, HloOpcode::kSubtract, constant, entry->root_instruction()));
- entry->set_root_instruction(sub);
-
- auto in_schedule = [&](const HloInstruction* hlo) {
- return std::find(sequence.at(entry).begin(), sequence.at(entry).end(),
- hlo) != sequence.at(entry).end();
- };
-
- EXPECT_EQ(sequence.at(entry).size(), 6);
- EXPECT_FALSE(in_schedule(constant));
- EXPECT_FALSE(in_schedule(sub));
-
- TF_ASSERT_OK(UpdateSchedule(*module, id_sequence, &sequence));
- TF_ASSERT_OK(VerifySchedule(*module, sequence));
-
- EXPECT_EQ(sequence.at(entry).size(), 8);
- EXPECT_TRUE(in_schedule(constant));
- EXPECT_TRUE(in_schedule(sub));
-}
-
-TEST_F(HloSchedulingTest, UpdateScheduleWithAddedAndDeletedInstruction) {
- // Add and delete some instructions from a module and verify that the schedule
- // can be updated successfully.
- const string module_str = R"(
-HloModule UpdateScheduleWithAddedAndDeletedInstruction
-
-ENTRY main {
- a = f32[] parameter(0)
- b = f32[] parameter(1)
- c = f32[] constant(42.0)
- sum = f32[] add(a, b)
- neg = f32[] negate(c)
- ROOT root = f32[] multiply(sum, neg)
-}
-)";
-
- TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
- ParseHloString(module_str));
- TF_ASSERT_OK_AND_ASSIGN(
- SequentialHloOrdering::HloModuleSequence sequence,
- ScheduleComputationsInModule(*module, [](const BufferValue& buffer) {
- return ShapeUtil::ByteSizeOf(buffer.shape());
- }));
- tensorflow::gtl::FlatMap<const HloComputation*, std::vector<int>>
- id_sequence = ComputeIdSchedule(sequence);
-
- // Set the entry root to some expression containing just a parameter and a
- // constant.
- HloComputation* entry = module->entry_computation();
- HloInstruction* constant = entry->AddInstruction(
- HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
- HloInstruction* new_root = entry->AddInstruction(
- HloInstruction::CreateBinary(constant->shape(), HloOpcode::kSubtract,
- constant, entry->parameter_instruction(0)));
- entry->set_root_instruction(new_root);
-
- // DCE should remove everything but the parameters and the newly added code.
- HloDCE dce;
- TF_ASSERT_OK(dce.Run(module.get()).status());
-
- EXPECT_EQ(sequence.at(entry).size(), 6);
-
- TF_ASSERT_OK(UpdateSchedule(*module, id_sequence, &sequence));
- TF_ASSERT_OK(VerifySchedule(*module, sequence));
-
- EXPECT_EQ(sequence.at(entry).size(), 4);
-}
-
-TEST_F(HloSchedulingTest, UpdateScheduleWithCompletelyReplacedModule) {
- // Completely replace a module with an entirely new set of instructions and
- // verify that the schedule can be updated successfully.
- const string module_str = R"(
-HloModule UpdateScheduleWithCompletelyReplacedModule
-
-ENTRY main {
- a = f32[] constant(42.0)
- b = f32[] constant(123.0)
- ROOT sum = f32[] add(a, b)
-}
-)";
-
- TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
- ParseHloString(module_str));
- TF_ASSERT_OK_AND_ASSIGN(
- SequentialHloOrdering::HloModuleSequence sequence,
- ScheduleComputationsInModule(*module, [](const BufferValue& buffer) {
- return ShapeUtil::ByteSizeOf(buffer.shape());
- }));
- tensorflow::gtl::FlatMap<const HloComputation*, std::vector<int>>
- id_sequence = ComputeIdSchedule(sequence);
-
- // Replace the entry computation with the negation of a constant.
- HloComputation* entry = module->entry_computation();
- HloInstruction* constant = entry->AddInstruction(
- HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
- HloInstruction* new_root = entry->AddInstruction(HloInstruction::CreateUnary(
- constant->shape(), HloOpcode::kNegate, constant));
- entry->set_root_instruction(new_root);
-
- // DCE the old instructions.
- HloDCE dce;
- TF_ASSERT_OK(dce.Run(module.get()).status());
-
- EXPECT_EQ(sequence.at(entry).size(), 3);
-
- TF_ASSERT_OK(UpdateSchedule(*module, id_sequence, &sequence));
- TF_ASSERT_OK(VerifySchedule(*module, sequence));
-
- EXPECT_EQ(sequence.at(entry).size(), 2);
-}
-
-TEST_F(HloSchedulingTest, UpdateScheduleWithMultipleComputations) {
- // Create changes to more than one computation in an HLO module and verify
- // that the schedule can be updated.
- const string module_str = R"(
-HloModule UpdateScheduleWithMultipleComputations
-
-%Body (param.1: (s32[], token[])) -> (s32[], token[]) {
- %param.1 = (s32[], token[]) parameter(0)
- %get-tuple-element.1 = s32[] get-tuple-element((s32[], token[]) %param.1), index=0
- %constant.1 = s32[] constant(1)
- %add = s32[] add(s32[] %get-tuple-element.1, s32[] %constant.1)
- %get-tuple-element.2 = token[] get-tuple-element((s32[], token[]) %param.1), index=1
- %after-all = token[] after-all(token[] %get-tuple-element.2)
- ROOT %tuple = (s32[], token[]) tuple(s32[] %add, token[] %after-all)
-}
-
-%Cond (param: (s32[], token[])) -> pred[] {
- %param = (s32[], token[]) parameter(0)
- %get-tuple-element = s32[] get-tuple-element((s32[], token[]) %param), index=0
- %constant = s32[] constant(42)
- ROOT %less-than = pred[] less-than(s32[] %get-tuple-element, s32[] %constant)
-}
-
-ENTRY %WhileLoop () -> s32[] {
- %zero = s32[] constant(0)
- %init_token = token[] after-all()
- %init_tuple = (s32[], token[]) tuple(s32[] %zero, token[] %init_token)
- %while = (s32[], token[]) while((s32[], token[]) %init_tuple), condition=%Cond, body=%Body
- ROOT %root = s32[] get-tuple-element((s32[], token[]) %while), index=0
-}
-)";
-
- TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
- ParseHloString(module_str));
- TF_ASSERT_OK_AND_ASSIGN(
- SequentialHloOrdering::HloModuleSequence sequence,
- ScheduleComputationsInModule(*module, [](const BufferValue& buffer) {
- return ShapeUtil::ByteSizeOf(buffer.shape(),
- /*pointer_size=*/sizeof(void*));
- }));
- tensorflow::gtl::FlatMap<const HloComputation*, std::vector<int>>
- id_sequence = ComputeIdSchedule(sequence);
-
- const HloInstruction* xla_while =
- module->entry_computation()->root_instruction()->operand(0);
- HloComputation* body = xla_while->while_body();
- HloComputation* cond = xla_while->while_condition();
-
- // Negate the root of the cond.
- cond->set_root_instruction(cond->AddInstruction(
- HloInstruction::CreateUnary(ShapeUtil::MakeShape(PRED, {}),
- HloOpcode::kNot, cond->root_instruction())));
-
- // Replace the body with a computation which just passes through its
- // parameter.
- body->set_root_instruction(body->parameter_instruction(0));
-
- // DCE the dead code in the body.
- HloDCE dce;
- TF_ASSERT_OK(dce.Run(module.get()).status());
-
- EXPECT_EQ(sequence.at(body).size(), 7);
- EXPECT_EQ(sequence.at(cond).size(), 4);
-
- TF_ASSERT_OK(UpdateSchedule(*module, id_sequence, &sequence));
- TF_ASSERT_OK(VerifySchedule(*module, sequence));
-
- EXPECT_EQ(sequence.at(body).size(), 1);
- EXPECT_EQ(sequence.at(cond).size(), 5);
-}
-
} // namespace
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/indexed_array_analysis.cc b/tensorflow/compiler/xla/service/indexed_array_analysis.cc
index 4a71ee909b..37b774b8a5 100644
--- a/tensorflow/compiler/xla/service/indexed_array_analysis.cc
+++ b/tensorflow/compiler/xla/service/indexed_array_analysis.cc
@@ -1031,8 +1031,8 @@ bool CanFoldDotIntoIndexedArray(
StatusOr<Analysis::Array*>
IndexedArrayAnalysis::ComputeArrayForDotWithIndexedLhs(
const Shape& shape, const DotDimensionNumbers& dim_numbers,
- const PrecisionConfigProto& precision_config,
- ScalarIndexedConstantArray* lhs, ConstantArray* rhs) {
+ const PrecisionConfig& precision_config, ScalarIndexedConstantArray* lhs,
+ ConstantArray* rhs) {
VLOG(3) << "ComputeArrayForDotWithIndexedLhs(" << ToString(lhs) << " "
<< ToString(rhs);
if (!CanFoldDotIntoIndexedArray(
@@ -1066,7 +1066,7 @@ IndexedArrayAnalysis::ComputeArrayForDotWithIndexedLhs(
StatusOr<Analysis::Array*>
IndexedArrayAnalysis::ComputeArrayForDotWithIndexedRhs(
const Shape& shape, const DotDimensionNumbers& dim_numbers,
- const PrecisionConfigProto& precision_config, ConstantArray* lhs,
+ const PrecisionConfig& precision_config, ConstantArray* lhs,
ScalarIndexedConstantArray* rhs) {
VLOG(3) << "ComputeArrayForDotWithIndexedRhs(" << ToString(lhs) << " "
<< ToString(rhs);
@@ -1101,7 +1101,7 @@ IndexedArrayAnalysis::ComputeArrayForDotWithIndexedRhs(
StatusOr<Analysis::Array*> IndexedArrayAnalysis::ComputeArrayForDot(
const Shape& shape, const DotDimensionNumbers& dim_numbers,
- const PrecisionConfigProto& precision_config, Array* lhs, Array* rhs) {
+ const PrecisionConfig& precision_config, Array* lhs, Array* rhs) {
// Intuitively, if
//
// - The LHS of a dot product is a gathered sequence of rows from a constant
diff --git a/tensorflow/compiler/xla/service/indexed_array_analysis.h b/tensorflow/compiler/xla/service/indexed_array_analysis.h
index f21e784a4d..9746d176cc 100644
--- a/tensorflow/compiler/xla/service/indexed_array_analysis.h
+++ b/tensorflow/compiler/xla/service/indexed_array_analysis.h
@@ -267,17 +267,18 @@ class IndexedArrayAnalysis {
StatusOr<Array*> ComputeArrayForDotWithIndexedLhs(
const Shape& shape, const DotDimensionNumbers& dim_numbers,
- const PrecisionConfigProto& precision_config,
- ScalarIndexedConstantArray* lhs, ConstantArray* rhs);
+ const PrecisionConfig& precision_config, ScalarIndexedConstantArray* lhs,
+ ConstantArray* rhs);
StatusOr<Array*> ComputeArrayForDotWithIndexedRhs(
const Shape& shape, const DotDimensionNumbers& dim_numbers,
- const PrecisionConfigProto& precision_config, ConstantArray* lhs,
+ const PrecisionConfig& precision_config, ConstantArray* lhs,
ScalarIndexedConstantArray* rhs);
- StatusOr<Array*> ComputeArrayForDot(
- const Shape& shape, const DotDimensionNumbers& dim_numbers,
- const PrecisionConfigProto& precision_config, Array* lhs, Array* rhs);
+ StatusOr<Array*> ComputeArrayForDot(const Shape& shape,
+ const DotDimensionNumbers& dim_numbers,
+ const PrecisionConfig& precision_config,
+ Array* lhs, Array* rhs);
// This tries to fold a ScalarIndexedArray which has another
// ScalarIndexedArray as a source into a ScalarIndexedArray that instead has a
diff --git a/tensorflow/compiler/xla/service/transpose_folding_test.cc b/tensorflow/compiler/xla/service/transpose_folding_test.cc
index e486a00e53..79b5c09abb 100644
--- a/tensorflow/compiler/xla/service/transpose_folding_test.cc
+++ b/tensorflow/compiler/xla/service/transpose_folding_test.cc
@@ -215,13 +215,6 @@ ENTRY entry_computation {
/*lhs_contracting_dim=*/1, /*rhs_contracting_dim=*/1));
}
-PrecisionConfigProto DefaultPrecisionConfig(int operands) {
- PrecisionConfigProto precision_config;
- precision_config.mutable_operand_precision()->Resize(
- operands, PrecisionConfigProto::DEFAULT);
- return precision_config;
-}
-
// Test that a two dimension swap of the kernel gets folded into convolution.
TEST_F(TransposeFoldingTest, FoldConvDimSwapTransposeRhs) {
auto builder = HloComputation::Builder("entry_computation");
diff --git a/tensorflow/compiler/xla/service/tuple_points_to_analysis_test.cc b/tensorflow/compiler/xla/service/tuple_points_to_analysis_test.cc
index e3328203a6..2b2a2eb42a 100644
--- a/tensorflow/compiler/xla/service/tuple_points_to_analysis_test.cc
+++ b/tensorflow/compiler/xla/service/tuple_points_to_analysis_test.cc
@@ -1064,9 +1064,9 @@ TEST_F(CanShareOperandBufferWithUserTest, FusedDotAdd) {
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
- PrecisionConfigProto precision_config;
+ PrecisionConfig precision_config;
precision_config.mutable_operand_precision()->Resize(
- /*new_size=*/2, PrecisionConfigProto::DEFAULT);
+ /*new_size=*/2, PrecisionConfig::DEFAULT);
auto dot = builder.AddInstruction(
HloInstruction::CreateDot(data_shape, a, b, dot_dnums, precision_config));
diff --git a/tensorflow/compiler/xla/service/while_loop_constant_sinking.cc b/tensorflow/compiler/xla/service/while_loop_constant_sinking.cc
index aab1180662..56145822be 100644
--- a/tensorflow/compiler/xla/service/while_loop_constant_sinking.cc
+++ b/tensorflow/compiler/xla/service/while_loop_constant_sinking.cc
@@ -15,10 +15,10 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/while_loop_constant_sinking.h"
#include "absl/algorithm/container.h"
+#include "absl/container/inlined_vector.h"
#include "tensorflow/compiler/xla/service/while_util.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/lib/gtl/flatmap.h"
-#include "tensorflow/core/lib/gtl/inlined_vector.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/tests/hlo_test_base.cc b/tensorflow/compiler/xla/tests/hlo_test_base.cc
index fc4c68246e..3df99aac7d 100644
--- a/tensorflow/compiler/xla/tests/hlo_test_base.cc
+++ b/tensorflow/compiler/xla/tests/hlo_test_base.cc
@@ -120,6 +120,14 @@ StatusOr<bool> HloTestBase::RunHloPass(HloPassInterface* hlo_pass,
return status_or;
}
+/* static */
+PrecisionConfig HloTestBase::DefaultPrecisionConfig(int operands) {
+ PrecisionConfig precision_config;
+ precision_config.mutable_operand_precision()->Resize(
+ operands, PrecisionConfig::DEFAULT);
+ return precision_config;
+}
+
DebugOptions HloTestBase::GetDebugOptionsForTest() {
auto debug_options = legacy_flags::GetDebugOptionsFromFlags();
// TODO(b/38354253): Change tests to use Parameters instead of Constants.
diff --git a/tensorflow/compiler/xla/tests/hlo_test_base.h b/tensorflow/compiler/xla/tests/hlo_test_base.h
index 4c88257bb2..21d77c0cc4 100644
--- a/tensorflow/compiler/xla/tests/hlo_test_base.h
+++ b/tensorflow/compiler/xla/tests/hlo_test_base.h
@@ -80,6 +80,8 @@ class HloTestBase : public ::testing::Test {
static StatusOr<bool> RunHloPass(HloPassInterface* hlo_pass,
HloModule* module);
+ static PrecisionConfig DefaultPrecisionConfig(int operands);
+
protected:
// This uses the interpreter backend as the reference backend and
// automatically finds another supported backend as the test backend. If the
diff --git a/tensorflow/compiler/xla/tests/multioutput_fusion_test.cc b/tensorflow/compiler/xla/tests/multioutput_fusion_test.cc
index 53b5e933b6..c5e0b9b097 100644
--- a/tensorflow/compiler/xla/tests/multioutput_fusion_test.cc
+++ b/tensorflow/compiler/xla/tests/multioutput_fusion_test.cc
@@ -47,13 +47,6 @@ limitations under the License.
namespace xla {
namespace {
-PrecisionConfigProto DefaultPrecisionConfig(int operands) {
- PrecisionConfigProto precision_config;
- precision_config.mutable_operand_precision()->Resize(
- operands, PrecisionConfigProto::DEFAULT);
- return precision_config;
-}
-
class MultiOutputFusionTest : public HloTestBase {
protected:
MultiOutputFusionTest() { error_spec_ = ErrorSpec{0.0001, 1e-2}; }
diff --git a/tensorflow/compiler/xla/xla_data.proto b/tensorflow/compiler/xla/xla_data.proto
index 8e43f275e1..dd329f1181 100644
--- a/tensorflow/compiler/xla/xla_data.proto
+++ b/tensorflow/compiler/xla/xla_data.proto
@@ -580,7 +580,7 @@ message SourceTarget {
// Used to indicate the precision configuration. It has backend specific
// meaning.
-message PrecisionConfigProto {
+message PrecisionConfig {
enum Precision {
DEFAULT = 0;
HIGH = 1;
diff --git a/tensorflow/compiler/xrt/BUILD b/tensorflow/compiler/xrt/BUILD
index efbe980278..2ff97914f8 100644
--- a/tensorflow/compiler/xrt/BUILD
+++ b/tensorflow/compiler/xrt/BUILD
@@ -56,6 +56,7 @@ cc_library(
"//tensorflow/core:lib_internal",
"//tensorflow/stream_executor",
"@com_google_absl//absl/memory",
+ "@com_google_absl//absl/strings",
"@com_google_absl//absl/synchronization",
],
)
diff --git a/tensorflow/compiler/xrt/kernels/BUILD b/tensorflow/compiler/xrt/kernels/BUILD
index 68ba17a424..9e3d2454d1 100644
--- a/tensorflow/compiler/xrt/kernels/BUILD
+++ b/tensorflow/compiler/xrt/kernels/BUILD
@@ -46,19 +46,15 @@ cc_library(
deps = [
":xrt_state_ops",
"//tensorflow/compiler/tf2xla:xla_compiler",
- "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/compiler/xla/client:client_library",
- "//tensorflow/compiler/xla/client:compile_only_client",
"//tensorflow/compiler/xla/client:local_client",
"//tensorflow/compiler/xla/client:xla_computation",
- "//tensorflow/compiler/xla/legacy_flags:debug_options_flags",
"//tensorflow/compiler/xla/service:compiler",
"//tensorflow/compiler/xla/service:computation_placer",
- "//tensorflow/compiler/xla/service:hlo_proto",
"//tensorflow/compiler/xrt:xrt_proto",
"//tensorflow/compiler/xrt:xrt_utils",
"//tensorflow/core:core_cpu_internal",
@@ -67,6 +63,7 @@ cc_library(
"//tensorflow/core:lib_internal",
"//tensorflow/core:protos_all_cc",
"//tensorflow/stream_executor:stream_executor_headers_lib",
+ "@com_google_absl//absl/strings",
],
alwayslink = 1,
)
diff --git a/tensorflow/compiler/xrt/kernels/xrt_compile_ops.cc b/tensorflow/compiler/xrt/kernels/xrt_compile_ops.cc
index 5cf2bc8861..1d4f8d97f2 100644
--- a/tensorflow/compiler/xrt/kernels/xrt_compile_ops.cc
+++ b/tensorflow/compiler/xrt/kernels/xrt_compile_ops.cc
@@ -22,6 +22,7 @@ limitations under the License.
#include <utility>
#include <vector>
+#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/compiler/xla/client/client_library.h"
#include "tensorflow/compiler/xla/client/xla_computation.h"
@@ -40,7 +41,6 @@ limitations under the License.
#include "tensorflow/core/lib/core/refcount.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/proto_serialization.h"
-#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/fingerprint.h"
#include "tensorflow/core/platform/types.h"
@@ -70,7 +70,7 @@ Status CompilationCacheKey(const xrt::XLAComputation& computation,
string serialized;
TF_RET_CHECK(SerializeToStringDeterministic(computation, &serialized));
uint64 fingerprint = Fingerprint64(serialized);
- *key = strings::StrCat(fingerprint);
+ *key = absl::StrCat(fingerprint);
return Status::OK();
}
diff --git a/tensorflow/compiler/xrt/xrt_state.cc b/tensorflow/compiler/xrt/xrt_state.cc
index 911ac9a78b..2c3b07da58 100644
--- a/tensorflow/compiler/xrt/xrt_state.cc
+++ b/tensorflow/compiler/xrt/xrt_state.cc
@@ -24,6 +24,7 @@ limitations under the License.
#include <utility>
#include "absl/memory/memory.h"
+#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/service/backend.h"
#include "tensorflow/compiler/xla/shape_util.h"
@@ -32,7 +33,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/lib/core/status.h"
-#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/stream_executor/stream_executor.h"
@@ -201,14 +201,14 @@ const se::DeviceMemoryBase& XRTTupleAllocation::root_allocation() {
/*static*/ Status XRTTupleAllocation::Lookup(ResourceMgr* rm, int64 key,
XRTTupleAllocation** allocation) {
- string key_string = strings::StrCat(key);
+ string key_string = absl::StrCat(key);
TF_RETURN_IF_ERROR(rm->Lookup(kTupleContainer, key_string, allocation));
return Status::OK();
}
/*static*/ Status XRTTupleAllocation::DeleteFromResourceManager(ResourceMgr* rm,
int64 key) {
- string key_string = strings::StrCat(key);
+ string key_string = absl::StrCat(key);
return rm->Delete<XRTTupleAllocation>(kTupleContainer, key_string);
}
@@ -410,7 +410,7 @@ typedef XRTBufferAllocation* XRTBufferAllocationPtr;
Status XRTTupleAllocation::Intern(ResourceMgr* rm, int64* key) {
*key = get_uid();
- string key_string = strings::StrCat(*key);
+ string key_string = absl::StrCat(*key);
return rm->Create(kTupleContainer, key_string, this);
}
diff --git a/tensorflow/contrib/__init__.py b/tensorflow/contrib/__init__.py
index 5f477a79a3..9478e42b46 100644
--- a/tensorflow/contrib/__init__.py
+++ b/tensorflow/contrib/__init__.py
@@ -21,6 +21,14 @@ from __future__ import print_function
import os
+from tensorflow.python.tools import component_api_helper
+component_api_helper.package_hook(
+ parent_package_str=(
+ "tensorflow.contrib"),
+ child_package_str=(
+ "tensorflow_estimator.contrib.estimator"))
+del component_api_helper
+
# Add projects here, they will show up under tf.contrib.
from tensorflow.contrib import autograph
from tensorflow.contrib import batching
diff --git a/tensorflow/contrib/bigtable/kernels/bigtable_kernels.cc b/tensorflow/contrib/bigtable/kernels/bigtable_kernels.cc
index a25a641cdb..6138d79126 100644
--- a/tensorflow/contrib/bigtable/kernels/bigtable_kernels.cc
+++ b/tensorflow/contrib/bigtable/kernels/bigtable_kernels.cc
@@ -172,6 +172,11 @@ class BigtableTableOp : public OpKernel {
REGISTER_KERNEL_BUILDER(Name("BigtableTable").Device(DEVICE_CPU),
BigtableTableOp);
+} // namespace
+
+namespace data {
+namespace {
+
class ToBigtableOp : public AsyncOpKernel {
public:
explicit ToBigtableOp(OpKernelConstruction* ctx)
@@ -354,5 +359,6 @@ REGISTER_KERNEL_BUILDER(Name("DatasetToBigtable").Device(DEVICE_CPU),
ToBigtableOp);
} // namespace
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/contrib/bigtable/kernels/bigtable_lib.h b/tensorflow/contrib/bigtable/kernels/bigtable_lib.h
index a2a5df1037..4652021fec 100644
--- a/tensorflow/contrib/bigtable/kernels/bigtable_lib.h
+++ b/tensorflow/contrib/bigtable/kernels/bigtable_lib.h
@@ -79,6 +79,8 @@ class BigtableTableResource : public ResourceBase {
::google::cloud::bigtable::noex::Table table_;
};
+namespace data {
+
// BigtableReaderDatasetIterator is an abstract class for iterators from
// datasets that are "readers" (source datasets, not transformation datasets)
// that read from Bigtable.
@@ -138,6 +140,8 @@ class BigtableReaderDatasetIterator : public DatasetIterator<Dataset> {
::google::cloud::bigtable::RowReader::iterator iterator_ GUARDED_BY(mu_);
};
+} // namespace data
+
} // namespace tensorflow
#endif // TENSORFLOW_CONTRIB_BIGTABLE_KERNELS_BIGTABLE_LIB_H_
diff --git a/tensorflow/contrib/bigtable/kernels/bigtable_lookup_dataset_op.cc b/tensorflow/contrib/bigtable/kernels/bigtable_lookup_dataset_op.cc
index bd32672aa9..11f530e82a 100644
--- a/tensorflow/contrib/bigtable/kernels/bigtable_lookup_dataset_op.cc
+++ b/tensorflow/contrib/bigtable/kernels/bigtable_lookup_dataset_op.cc
@@ -17,6 +17,7 @@ limitations under the License.
#include "tensorflow/core/framework/op_kernel.h"
namespace tensorflow {
+namespace data {
namespace {
class BigtableLookupDatasetOp : public UnaryDatasetOpKernel {
@@ -226,4 +227,5 @@ REGISTER_KERNEL_BUILDER(Name("BigtableLookupDataset").Device(DEVICE_CPU),
BigtableLookupDatasetOp);
} // namespace
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/contrib/bigtable/kernels/bigtable_prefix_key_dataset_op.cc b/tensorflow/contrib/bigtable/kernels/bigtable_prefix_key_dataset_op.cc
index a803fdcb49..5cab729d9c 100644
--- a/tensorflow/contrib/bigtable/kernels/bigtable_prefix_key_dataset_op.cc
+++ b/tensorflow/contrib/bigtable/kernels/bigtable_prefix_key_dataset_op.cc
@@ -17,6 +17,7 @@ limitations under the License.
#include "tensorflow/core/framework/op_kernel.h"
namespace tensorflow {
+namespace data {
namespace {
class BigtablePrefixKeyDatasetOp : public DatasetOpKernel {
@@ -111,4 +112,5 @@ REGISTER_KERNEL_BUILDER(Name("BigtablePrefixKeyDataset").Device(DEVICE_CPU),
BigtablePrefixKeyDatasetOp);
} // namespace
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/contrib/bigtable/kernels/bigtable_range_key_dataset_op.cc b/tensorflow/contrib/bigtable/kernels/bigtable_range_key_dataset_op.cc
index 5cd0371c79..4dc4647bd2 100644
--- a/tensorflow/contrib/bigtable/kernels/bigtable_range_key_dataset_op.cc
+++ b/tensorflow/contrib/bigtable/kernels/bigtable_range_key_dataset_op.cc
@@ -17,6 +17,7 @@ limitations under the License.
#include "tensorflow/core/framework/op_kernel.h"
namespace tensorflow {
+namespace data {
namespace {
class BigtableRangeKeyDatasetOp : public DatasetOpKernel {
@@ -117,4 +118,5 @@ class BigtableRangeKeyDatasetOp : public DatasetOpKernel {
REGISTER_KERNEL_BUILDER(Name("BigtableRangeKeyDataset").Device(DEVICE_CPU),
BigtableRangeKeyDatasetOp);
} // namespace
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/contrib/bigtable/kernels/bigtable_sample_key_pairs_dataset_op.cc b/tensorflow/contrib/bigtable/kernels/bigtable_sample_key_pairs_dataset_op.cc
index 6928d9423c..736775bdac 100644
--- a/tensorflow/contrib/bigtable/kernels/bigtable_sample_key_pairs_dataset_op.cc
+++ b/tensorflow/contrib/bigtable/kernels/bigtable_sample_key_pairs_dataset_op.cc
@@ -18,6 +18,7 @@ limitations under the License.
#include "tensorflow/core/framework/op_kernel.h"
namespace tensorflow {
+namespace data {
namespace {
class BigtableSampleKeyPairsDatasetOp : public DatasetOpKernel {
@@ -205,4 +206,5 @@ REGISTER_KERNEL_BUILDER(
BigtableSampleKeyPairsDatasetOp);
} // namespace
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/contrib/bigtable/kernels/bigtable_sample_keys_dataset_op.cc b/tensorflow/contrib/bigtable/kernels/bigtable_sample_keys_dataset_op.cc
index a759fb5063..208b7b3e08 100644
--- a/tensorflow/contrib/bigtable/kernels/bigtable_sample_keys_dataset_op.cc
+++ b/tensorflow/contrib/bigtable/kernels/bigtable_sample_keys_dataset_op.cc
@@ -17,6 +17,7 @@ limitations under the License.
#include "tensorflow/core/framework/op_kernel.h"
namespace tensorflow {
+namespace data {
namespace {
class BigtableSampleKeysDatasetOp : public DatasetOpKernel {
@@ -118,4 +119,5 @@ REGISTER_KERNEL_BUILDER(Name("BigtableSampleKeysDataset").Device(DEVICE_CPU),
BigtableSampleKeysDatasetOp);
} // namespace
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/contrib/bigtable/kernels/bigtable_scan_dataset_op.cc b/tensorflow/contrib/bigtable/kernels/bigtable_scan_dataset_op.cc
index 78a920b077..9407855fe8 100644
--- a/tensorflow/contrib/bigtable/kernels/bigtable_scan_dataset_op.cc
+++ b/tensorflow/contrib/bigtable/kernels/bigtable_scan_dataset_op.cc
@@ -17,6 +17,7 @@ limitations under the License.
#include "tensorflow/core/framework/op_kernel.h"
namespace tensorflow {
+namespace data {
namespace {
class BigtableScanDatasetOp : public DatasetOpKernel {
@@ -224,4 +225,5 @@ REGISTER_KERNEL_BUILDER(Name("BigtableScanDataset").Device(DEVICE_CPU),
BigtableScanDatasetOp);
} // namespace
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/contrib/data/kernels/assert_next_dataset_op.cc b/tensorflow/contrib/data/kernels/assert_next_dataset_op.cc
index e36c9c0634..c19a609780 100644
--- a/tensorflow/contrib/data/kernels/assert_next_dataset_op.cc
+++ b/tensorflow/contrib/data/kernels/assert_next_dataset_op.cc
@@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/core/framework/tensor.h"
namespace tensorflow {
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -150,4 +151,5 @@ REGISTER_KERNEL_BUILDER(Name("AssertNextDataset").Device(DEVICE_CPU),
AssertNextDatasetOp);
} // namespace
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/contrib/data/kernels/csv_dataset_op.cc b/tensorflow/contrib/data/kernels/csv_dataset_op.cc
index 0ba905b92e..74107d5242 100644
--- a/tensorflow/contrib/data/kernels/csv_dataset_op.cc
+++ b/tensorflow/contrib/data/kernels/csv_dataset_op.cc
@@ -24,6 +24,7 @@ limitations under the License.
#include "tensorflow/core/lib/io/zlib_inputstream.h"
namespace tensorflow {
+namespace data {
namespace {
class CSVDatasetOp : public DatasetOpKernel {
@@ -851,4 +852,5 @@ class CSVDatasetOp : public DatasetOpKernel {
REGISTER_KERNEL_BUILDER(Name("CSVDataset").Device(DEVICE_CPU), CSVDatasetOp);
} // namespace
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/contrib/data/kernels/directed_interleave_dataset_op.cc b/tensorflow/contrib/data/kernels/directed_interleave_dataset_op.cc
index ccf7ec1f84..a5321620bf 100644
--- a/tensorflow/contrib/data/kernels/directed_interleave_dataset_op.cc
+++ b/tensorflow/contrib/data/kernels/directed_interleave_dataset_op.cc
@@ -18,7 +18,7 @@ limitations under the License.
#include "tensorflow/core/lib/hash/hash.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -276,5 +276,5 @@ REGISTER_KERNEL_BUILDER(Name("DirectedInterleaveDataset").Device(DEVICE_CPU),
DirectedInterleaveDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/contrib/data/kernels/identity_indexed_dataset.cc b/tensorflow/contrib/data/kernels/identity_indexed_dataset.cc
index 4718c1c8b9..c3cb45dbf7 100644
--- a/tensorflow/contrib/data/kernels/identity_indexed_dataset.cc
+++ b/tensorflow/contrib/data/kernels/identity_indexed_dataset.cc
@@ -17,6 +17,7 @@ limitations under the License.
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
+namespace data {
namespace {
class IdentityIndexedDatasetOp : public IndexedDatasetOpKernel {
@@ -150,4 +151,5 @@ REGISTER_KERNEL_BUILDER(Name("IdentityIndexedDataset").Device(DEVICE_CPU),
IdentityIndexedDatasetOp);
} // namespace
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/contrib/data/kernels/ignore_errors_dataset_op.cc b/tensorflow/contrib/data/kernels/ignore_errors_dataset_op.cc
index db24e60846..beec344534 100644
--- a/tensorflow/contrib/data/kernels/ignore_errors_dataset_op.cc
+++ b/tensorflow/contrib/data/kernels/ignore_errors_dataset_op.cc
@@ -18,7 +18,7 @@ limitations under the License.
#include "tensorflow/core/lib/random/random.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -137,5 +137,5 @@ REGISTER_KERNEL_BUILDER(Name("IgnoreErrorsDataset").Device(DEVICE_CPU),
IgnoreErrorsDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/contrib/data/kernels/indexed_dataset.cc b/tensorflow/contrib/data/kernels/indexed_dataset.cc
index c69564a31b..ced8ab0d60 100644
--- a/tensorflow/contrib/data/kernels/indexed_dataset.cc
+++ b/tensorflow/contrib/data/kernels/indexed_dataset.cc
@@ -20,7 +20,7 @@ limitations under the License.
#include "tensorflow/core/lib/gtl/cleanup.h"
namespace tensorflow {
-
+namespace data {
namespace {
Status VerifyTypesMatch(const DataTypeVector& expected,
@@ -367,6 +367,7 @@ REGISTER_KERNEL_BUILDER(Name("IndexedDatasetMaterialize").Device(DEVICE_CPU),
MaterializeDatasetOp);
REGISTER_KERNEL_BUILDER(Name("IndexedDatasetGet").Device(DEVICE_CPU),
IndexedDatasetGet);
-} // namespace
+} // namespace
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/contrib/data/kernels/indexed_dataset.h b/tensorflow/contrib/data/kernels/indexed_dataset.h
index 6149de888c..7aa2d3fdbc 100644
--- a/tensorflow/contrib/data/kernels/indexed_dataset.h
+++ b/tensorflow/contrib/data/kernels/indexed_dataset.h
@@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/core/framework/op_kernel.h"
namespace tensorflow {
+namespace data {
// TODO(saeta): Urgh, this is ugly.
class MaterializedIndexedDataset {
@@ -112,6 +113,7 @@ Status GetIndexedDatasetFromVariantTensor(const Tensor& tensor,
Status StoreIndexedDatasetInVariantTensor(IndexedDataset* dataset,
Tensor* tensor);
+} // namespace data
} // namespace tensorflow
#endif // TENSORFLOW_CONTRIB_DATA_KERNELS_INDEXED_DATASET_H_
diff --git a/tensorflow/contrib/data/kernels/lmdb_dataset_op.cc b/tensorflow/contrib/data/kernels/lmdb_dataset_op.cc
index 80f39992fb..d233c1f8ec 100644
--- a/tensorflow/contrib/data/kernels/lmdb_dataset_op.cc
+++ b/tensorflow/contrib/data/kernels/lmdb_dataset_op.cc
@@ -22,6 +22,7 @@ limitations under the License.
#include "lmdb.h" // NOLINT(build/include)
namespace tensorflow {
+namespace data {
namespace {
class LMDBDatasetOp : public DatasetOpKernel {
@@ -212,4 +213,5 @@ class LMDBDatasetOp : public DatasetOpKernel {
REGISTER_KERNEL_BUILDER(Name("LMDBDataset").Device(DEVICE_CPU), LMDBDatasetOp);
} // namespace
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/contrib/data/kernels/prefetching_kernels.cc b/tensorflow/contrib/data/kernels/prefetching_kernels.cc
index 725f8933c9..078de717e0 100644
--- a/tensorflow/contrib/data/kernels/prefetching_kernels.cc
+++ b/tensorflow/contrib/data/kernels/prefetching_kernels.cc
@@ -24,6 +24,7 @@ limitations under the License.
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
+namespace data {
namespace {
struct BufferElement {
@@ -1114,5 +1115,6 @@ REGISTER_KERNEL_BUILDER(
Name("MultiDeviceIteratorFromStringHandle").Device(DEVICE_CPU),
MultiDeviceIteratorFromStringHandleOp);
-} // anonymous namespace
+} // namespace
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/contrib/data/kernels/threadpool_dataset_op.cc b/tensorflow/contrib/data/kernels/threadpool_dataset_op.cc
index ab584504a0..30fa97a636 100644
--- a/tensorflow/contrib/data/kernels/threadpool_dataset_op.cc
+++ b/tensorflow/contrib/data/kernels/threadpool_dataset_op.cc
@@ -20,6 +20,7 @@ limitations under the License.
#include "tensorflow/core/util/work_sharder.h"
namespace tensorflow {
+namespace data {
namespace {
class ThreadPoolResource : public ResourceBase {
@@ -214,4 +215,5 @@ REGISTER_KERNEL_BUILDER(Name("ThreadPoolDataset").Device(DEVICE_CPU),
ThreadPoolDatasetOp);
} // namespace
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/contrib/data/kernels/unique_dataset_op.cc b/tensorflow/contrib/data/kernels/unique_dataset_op.cc
index 6fbf5d2ebb..57fc5697a4 100644
--- a/tensorflow/contrib/data/kernels/unique_dataset_op.cc
+++ b/tensorflow/contrib/data/kernels/unique_dataset_op.cc
@@ -18,7 +18,7 @@ limitations under the License.
#include "tensorflow/core/lib/hash/hash.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -219,5 +219,5 @@ REGISTER_KERNEL_BUILDER(Name("UniqueDataset").Device(DEVICE_CPU),
UniqueDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/contrib/data/python/kernel_tests/map_defun_op_test.py b/tensorflow/contrib/data/python/kernel_tests/map_defun_op_test.py
index 73cde40305..091eb5ce37 100644
--- a/tensorflow/contrib/data/python/kernel_tests/map_defun_op_test.py
+++ b/tensorflow/contrib/data/python/kernel_tests/map_defun_op_test.py
@@ -130,6 +130,22 @@ class MapDefunTest(test.TestCase):
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(result)
+ def testMapDefunCancelledCorrectly(self):
+
+ @function.Defun(dtypes.int64)
+ def defun(x):
+ # x has leading dimension 5, this will raise an error
+ return array_ops.gather(x, 10)
+
+ c = array_ops.tile(
+ array_ops.expand_dims(
+ constant_op.constant([1, 2, 3, 4, 5], dtype=dtypes.int64), 0),
+ [100, 1])
+ map_defun_op = map_defun.map_defun(defun, [c], [dtypes.int64], [()])[0]
+ with self.assertRaisesRegexp(errors.InvalidArgumentError,
+ r"indices = 10 is not in \[0, 5\)"):
+ self.evaluate(map_defun_op)
+
if __name__ == "__main__":
test.main()
diff --git a/tensorflow/contrib/data/python/ops/batching.py b/tensorflow/contrib/data/python/ops/batching.py
index 9c2001c34f..367c159dc5 100644
--- a/tensorflow/contrib/data/python/ops/batching.py
+++ b/tensorflow/contrib/data/python/ops/batching.py
@@ -272,9 +272,9 @@ def _padded_batch_dense_window(dataset, padded_shape, padding_value=None):
padding_value = 0
def batch_init_fn(_):
- return array_ops.fill(
- array_ops.concat([np.array([0], dtype=np.int32), padded_shape], 0),
- constant_op.constant(padding_value, dtype=dataset.output_types))
+ batch_shape = array_ops.concat(
+ [np.array([0], dtype=np.int32), padded_shape], 0)
+ return gen_array_ops.empty(batch_shape, dtype=dataset.output_types)
def batch_reduce_fn(state, value):
return array_ops.concat([state, [value]], 0)
diff --git a/tensorflow/contrib/distribute/python/examples/keras_mnist.py b/tensorflow/contrib/distribute/python/examples/keras_mnist.py
index a20069c4fe..0495134636 100644
--- a/tensorflow/contrib/distribute/python/examples/keras_mnist.py
+++ b/tensorflow/contrib/distribute/python/examples/keras_mnist.py
@@ -58,13 +58,13 @@ def get_input_datasets():
train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_ds = train_ds.repeat()
train_ds = train_ds.shuffle(100)
- train_ds = train_ds.batch(64)
+ train_ds = train_ds.batch(64, drop_remainder=True)
# eval dataset
eval_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test))
eval_ds = eval_ds.repeat()
eval_ds = eval_ds.shuffle(100)
- eval_ds = eval_ds.batch(64)
+ eval_ds = eval_ds.batch(64, drop_remainder=True)
return train_ds, eval_ds, input_shape
diff --git a/tensorflow/contrib/hadoop/kernels/hadoop_dataset_ops.cc b/tensorflow/contrib/hadoop/kernels/hadoop_dataset_ops.cc
index 80b2d3e08b..2bf6097d01 100644
--- a/tensorflow/contrib/hadoop/kernels/hadoop_dataset_ops.cc
+++ b/tensorflow/contrib/hadoop/kernels/hadoop_dataset_ops.cc
@@ -18,6 +18,7 @@ limitations under the License.
#include "tensorflow/core/platform/file_system.h"
namespace tensorflow {
+namespace data {
namespace {
static const size_t kSyncMarkerSize = 16;
@@ -332,9 +333,10 @@ class SequenceFileDatasetOp : public DatasetOpKernel {
};
DataTypeVector output_types_;
};
-} // namespace
REGISTER_KERNEL_BUILDER(Name("SequenceFileDataset").Device(DEVICE_CPU),
SequenceFileDatasetOp);
+} // namespace
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/contrib/lite/builtin_op_data.h b/tensorflow/contrib/lite/builtin_op_data.h
index e81f9e4f51..aecd71910c 100644
--- a/tensorflow/contrib/lite/builtin_op_data.h
+++ b/tensorflow/contrib/lite/builtin_op_data.h
@@ -25,6 +25,11 @@ extern "C" {
// TODO(aselle): Consider using "if this then that" for testing.
+// Useful placeholder to put in otherwise empty structs to avoid size warnings.
+typedef struct {
+ char dummy_;
+} EmptyStructPlaceholder;
+
// Possible padding types (for convolutions)
typedef enum {
kTfLitePaddingUnknown = 0,
@@ -129,9 +134,11 @@ typedef struct {
} TfLiteAddParams;
typedef struct {
+ EmptyStructPlaceholder placeholder_;
} TfLiteSpaceToBatchNDParams;
typedef struct {
+ EmptyStructPlaceholder placeholder_;
} TfLiteBatchToSpaceNDParams;
typedef struct {
@@ -178,9 +185,11 @@ typedef struct {
} TfLiteResizeBilinearParams;
typedef struct {
+ EmptyStructPlaceholder placeholder_;
} TfLitePadParams;
typedef struct {
+ EmptyStructPlaceholder placeholder_;
} TfLitePadV2Params;
typedef struct {
@@ -220,6 +229,7 @@ typedef struct {
} TfLiteGatherParams;
typedef struct {
+ EmptyStructPlaceholder placeholder_;
} TfLiteTransposeParams;
typedef struct {
diff --git a/tensorflow/contrib/lite/context.h b/tensorflow/contrib/lite/context.h
index c7f4df3cdc..b23183b743 100644
--- a/tensorflow/contrib/lite/context.h
+++ b/tensorflow/contrib/lite/context.h
@@ -39,6 +39,12 @@ extern "C" {
typedef enum { kTfLiteOk = 0, kTfLiteError = 1 } TfLiteStatus;
+// Forward declarations for use with dependent types.
+struct TfLiteContext;
+struct TfLiteNode;
+struct _TfLiteRegistration;
+struct _TfLiteDelegate;
+
// The list of external context types known to TF Lite. This list exists solely
// to avoid conflicts and to ensure ops can share the external contexts they
// need. Access to the external contexts is controled by one of the
@@ -60,10 +66,6 @@ typedef struct {
TfLiteStatus (*Refresh)(struct TfLiteContext* context);
} TfLiteExternalContext;
-// Forward declare so GetNode can use this is in Context.
-typedef struct _TfLiteRegistration TfLiteRegistration;
-typedef struct _TfLiteDelegate TfLiteDelegate;
-
#define kOptionalTensor (-1)
// Fixed size list of integers. Used for dimensions and inputs/outputs tensor
@@ -240,7 +242,7 @@ typedef struct {
// The delegate which knows how to handle `buffer_handle`.
// WARNING: This is an experimental interface that is subject to change.
- TfLiteDelegate* delegate;
+ struct _TfLiteDelegate* delegate;
// An integer buffer handle that can be handled by `delegate`.
// The value is valid only when delegate is not null.
@@ -278,7 +280,7 @@ void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor);
// A structure representing an instance of a node.
// This structure only exhibits the inputs, outputs and user defined data, not
// other features like the type.
-typedef struct {
+typedef struct TfLiteNode {
// Inputs to this node expressed as indices into the simulator's tensors.
TfLiteIntArray* inputs;
@@ -305,7 +307,7 @@ typedef struct {
// The pointer to the delegate. This is non-null only when the node is
// created by calling `interpreter.ModifyGraphWithDelegate`.
// WARNING: This is an experimental interface that is subject to change.
- TfLiteDelegate* delegate;
+ struct _TfLiteDelegate* delegate;
} TfLiteNode;
typedef struct TfLiteContext {
@@ -351,15 +353,15 @@ typedef struct TfLiteContext {
// Get a Tensor node by node_index.
// WARNING: This is an experimental interface that is subject to change.
- TfLiteStatus (*GetNodeAndRegistration)(struct TfLiteContext*, int node_index,
- TfLiteNode** node,
- TfLiteRegistration** registration);
+ TfLiteStatus (*GetNodeAndRegistration)(
+ struct TfLiteContext*, int node_index, struct TfLiteNode** node,
+ struct _TfLiteRegistration** registration);
// Replace ops with one or more stub delegate operations. This function
// does not take ownership of `nodes_to_replace`.
TfLiteStatus (*ReplaceSubgraphsWithDelegateKernels)(
- struct TfLiteContext*, TfLiteRegistration registration,
- const TfLiteIntArray* nodes_to_replace, TfLiteDelegate* delegate);
+ struct TfLiteContext*, struct _TfLiteRegistration registration,
+ const TfLiteIntArray* nodes_to_replace, struct _TfLiteDelegate* delegate);
// Number of threads that are recommended to subsystems like gemmlowp and
// eigen.
@@ -447,19 +449,20 @@ typedef struct _TfLiteDelegate {
// will look at the nodes and call ReplaceSubgraphsWithDelegateKernels()
// to ask the TensorFlow lite runtime to create macro-nodes to represent
// delegated subgraphs of the original graph.
- TfLiteStatus (*Prepare)(TfLiteContext* context, TfLiteDelegate* delegate);
+ TfLiteStatus (*Prepare)(struct TfLiteContext* context,
+ struct _TfLiteDelegate* delegate);
// Copy the data from delegate buffer handle to raw memory.
// This can be null if the delegate doesn't use its own buffer.
- TfLiteStatus (*CopyFromBufferHandle)(TfLiteContext* context,
- TfLiteDelegate* delegate,
+ TfLiteStatus (*CopyFromBufferHandle)(struct TfLiteContext* context,
+ struct _TfLiteDelegate* delegate,
TfLiteBufferHandle buffer_handle,
void* data, size_t size);
// Copy the data from raw memory to delegate buffer handle.
// This can be null if the delegate doesn't use its own buffer.
- TfLiteStatus (*CopyToBufferHandle)(TfLiteContext* context,
- TfLiteDelegate* delegate,
+ TfLiteStatus (*CopyToBufferHandle)(struct TfLiteContext* context,
+ struct _TfLiteDelegate* delegate,
TfLiteBufferHandle buffer_handle,
void* data, size_t size);
@@ -467,7 +470,8 @@ typedef struct _TfLiteDelegate {
// this doesn't release the underlying resource (e.g. textures). The
// resources are either owned by application layer or the delegate.
// This can be null if the delegate doesn't use its own buffer.
- void (*FreeBufferHandle)(TfLiteContext* context, TfLiteDelegate* delegate,
+ void (*FreeBufferHandle)(struct TfLiteContext* context,
+ struct _TfLiteDelegate* delegate,
TfLiteBufferHandle* handle);
} TfLiteDelegate;
diff --git a/tensorflow/contrib/lite/delegates/eager/delegate_test.cc b/tensorflow/contrib/lite/delegates/eager/delegate_test.cc
index eb47f46c0b..984f8bbc98 100644
--- a/tensorflow/contrib/lite/delegates/eager/delegate_test.cc
+++ b/tensorflow/contrib/lite/delegates/eager/delegate_test.cc
@@ -72,6 +72,26 @@ TEST_F(DelegateTest, FullGraph) {
ASSERT_THAT(GetShape(8), ElementsAre(2, 1));
ASSERT_THAT(GetValues(8), ElementsAre(14.52f, 38.72f));
+ ASSERT_EQ(GetType(8), kTfLiteFloat32);
+}
+
+TEST_F(DelegateTest, NonFloatTypeInference) {
+ AddTensors(3, {0, 1}, {2}, kTfLiteInt32, {2});
+
+ AddTfOp(testing::kAdd, {0, 1}, {2});
+
+ ConfigureDelegate();
+
+ SetShape(0, {2, 2});
+ SetTypedValues<int>(0, {1, 2, 3, 4});
+ SetShape(1, {2, 2});
+ SetTypedValues<int>(1, {4, 3, 2, 1});
+
+ ASSERT_TRUE(Invoke());
+
+ ASSERT_THAT(GetShape(2), ElementsAre(2, 2));
+ ASSERT_THAT(GetTypedValues<int>(2), ElementsAre(5, 5, 5, 5));
+ ASSERT_EQ(GetType(2), kTfLiteInt32);
}
TEST_F(DelegateTest, MixedGraph) {
diff --git a/tensorflow/contrib/lite/delegates/eager/kernel.cc b/tensorflow/contrib/lite/delegates/eager/kernel.cc
index f8467c7cb2..0ee4db1ffb 100644
--- a/tensorflow/contrib/lite/delegates/eager/kernel.cc
+++ b/tensorflow/contrib/lite/delegates/eager/kernel.cc
@@ -278,7 +278,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* tensor = &context->tensors[tensor_index];
TF_LITE_ENSURE_OK(
context,
- CopyShape(context, buffer_map->GetTensor(tensor_index), tensor));
+ CopyShapeAndType(context, buffer_map->GetTensor(tensor_index), tensor));
tensor->buffer_handle = tensor_index;
tensor->data_is_stale = true;
}
diff --git a/tensorflow/contrib/lite/delegates/eager/test_util.cc b/tensorflow/contrib/lite/delegates/eager/test_util.cc
index b8c9e2652a..8584999ace 100644
--- a/tensorflow/contrib/lite/delegates/eager/test_util.cc
+++ b/tensorflow/contrib/lite/delegates/eager/test_util.cc
@@ -25,19 +25,6 @@ namespace testing {
bool EagerModelTest::Invoke() { return interpreter_->Invoke() == kTfLiteOk; }
-void EagerModelTest::SetValues(int tensor_index,
- const std::vector<float>& values) {
- float* v = interpreter_->typed_tensor<float>(tensor_index);
- for (float f : values) {
- *v++ = f;
- }
-}
-
-std::vector<float> EagerModelTest::GetValues(int tensor_index) {
- TfLiteTensor* o = interpreter_->tensor(tensor_index);
- return std::vector<float>(o->data.f, o->data.f + o->bytes / sizeof(float));
-}
-
void EagerModelTest::SetShape(int tensor_index,
const std::vector<int>& values) {
ASSERT_EQ(interpreter_->ResizeInputTensor(tensor_index, values), kTfLiteOk);
@@ -54,13 +41,21 @@ std::vector<int> EagerModelTest::GetShape(int tensor_index) {
return result;
}
+TfLiteType EagerModelTest::GetType(int tensor_index) {
+ return interpreter_->tensor(tensor_index)->type;
+}
+
void EagerModelTest::AddTensors(int num_tensors, const std::vector<int>& inputs,
const std::vector<int>& outputs,
- const TfLiteType& type,
- const std::vector<int>& dims) {
+ TfLiteType type, const std::vector<int>& dims) {
interpreter_->AddTensors(num_tensors);
for (int i = 0; i < num_tensors; ++i) {
TfLiteQuantizationParams quant;
+ // Suppress explicit output type specification to ensure type inference
+ // works properly.
+ if (std::find(outputs.begin(), outputs.end(), i) != outputs.end()) {
+ type = kTfLiteFloat32;
+ }
CHECK_EQ(interpreter_->SetTensorParametersReadWrite(i, type,
/*name=*/"",
/*dims=*/dims, quant),
@@ -101,18 +96,26 @@ void EagerModelTest::AddTfOp(TfOpType op, const std::vector<int>& inputs,
return " attr{ key: '" + key + "' value {" + value + "}}";
};
+ // Crude type attribution, will need fleshing out as more tests are added.
+ // TODO(b/113613439): Use nodedef string utilities to properly handle
+ // all types.
+ string type_attribute = attr("T", "type: DT_FLOAT");
+ if (interpreter_->tensor(inputs[0])->type == kTfLiteInt32) {
+ type_attribute = attr("T", "type: DT_INT32");
+ }
+
if (op == kUnpack) {
- string attributes = attr("T", "type: DT_FLOAT") + attr("num", "i: 2") +
- attr("axis", "i: 0");
+ string attributes =
+ type_attribute + attr("num", "i: 2") + attr("axis", "i: 0");
AddTfOp("EagerUnpack", "Unpack", attributes, inputs, outputs);
} else if (op == kIdentity) {
- string attributes = attr("T", "type: DT_FLOAT");
+ string attributes = type_attribute;
AddTfOp("EagerIdentity", "Identity", attributes, inputs, outputs);
} else if (op == kAdd) {
- string attributes = attr("T", "type: DT_FLOAT");
+ string attributes = type_attribute;
AddTfOp("EagerAdd", "Add", attributes, inputs, outputs);
} else if (op == kMul) {
- string attributes = attr("T", "type: DT_FLOAT");
+ string attributes = type_attribute;
AddTfOp("EagerMul", "Mul", attributes, inputs, outputs);
} else if (op == kNonExistent) {
AddTfOp("NonExistentOp", "NonExistentOp", "", inputs, outputs);
diff --git a/tensorflow/contrib/lite/delegates/eager/test_util.h b/tensorflow/contrib/lite/delegates/eager/test_util.h
index 0eab9e1135..816db41931 100644
--- a/tensorflow/contrib/lite/delegates/eager/test_util.h
+++ b/tensorflow/contrib/lite/delegates/eager/test_util.h
@@ -44,11 +44,30 @@ class EagerModelTest : public ::testing::Test {
bool Invoke();
+ // Sets the (typed) tensor's values at the given index.
+ template <typename T>
+ void SetTypedValues(int tensor_index, const std::vector<T>& values) {
+ memcpy(interpreter_->typed_tensor<T>(tensor_index), values.data(),
+ values.size() * sizeof(T));
+ }
+
+ // Returns the (typed) tensor's values at the given index.
+ template <typename T>
+ std::vector<T> GetTypedValues(int tensor_index) {
+ const TfLiteTensor* t = interpreter_->tensor(tensor_index);
+ const T* tdata = interpreter_->typed_tensor<T>(tensor_index);
+ return std::vector<T>(tdata, tdata + t->bytes / sizeof(T));
+ }
+
// Sets the tensor's values at the given index.
- void SetValues(int tensor_index, const std::vector<float>& values);
+ void SetValues(int tensor_index, const std::vector<float>& values) {
+ SetTypedValues<float>(tensor_index, values);
+ }
// Returns the tensor's values at the given index.
- std::vector<float> GetValues(int tensor_index);
+ std::vector<float> GetValues(int tensor_index) {
+ return GetTypedValues<float>(tensor_index);
+ }
// Sets the tensor's shape at the given index.
void SetShape(int tensor_index, const std::vector<int>& values);
@@ -56,13 +75,16 @@ class EagerModelTest : public ::testing::Test {
// Returns the tensor's shape at the given index.
std::vector<int> GetShape(int tensor_index);
+ // Returns the tensor's type at the given index.
+ TfLiteType GetType(int tensor_index);
+
const TestErrorReporter& error_reporter() const { return error_reporter_; }
// Adds `num_tensor` tensors to the model. `inputs` contains the indices of
// the input tensors and `outputs` contains the indices of the output
// tensors. All tensors are set to have `type` and `dims`.
void AddTensors(int num_tensors, const std::vector<int>& inputs,
- const std::vector<int>& outputs, const TfLiteType& type,
+ const std::vector<int>& outputs, TfLiteType type,
const std::vector<int>& dims);
// Adds a TFLite Mul op. `inputs` contains the indices of the input tensors
diff --git a/tensorflow/contrib/lite/delegates/eager/util.cc b/tensorflow/contrib/lite/delegates/eager/util.cc
index 4426c653e6..051246bf86 100644
--- a/tensorflow/contrib/lite/delegates/eager/util.cc
+++ b/tensorflow/contrib/lite/delegates/eager/util.cc
@@ -26,8 +26,17 @@ TfLiteStatus ConvertStatus(TfLiteContext* context,
return kTfLiteOk;
}
-TfLiteStatus CopyShape(TfLiteContext* context, const tensorflow::Tensor& src,
- TfLiteTensor* tensor) {
+TfLiteStatus CopyShapeAndType(TfLiteContext* context,
+ const tensorflow::Tensor& src,
+ TfLiteTensor* tensor) {
+ tensor->type = GetTensorFlowLiteType(static_cast<TF_DataType>(src.dtype()));
+ if (tensor->type == kTfLiteNoType) {
+ context->ReportError(context,
+ "TF Lite does not support TensorFlow data type: %s",
+ DataTypeString(src.dtype()).c_str());
+ return kTfLiteError;
+ }
+
int num_dims = src.dims();
TfLiteIntArray* shape = TfLiteIntArrayCreate(num_dims);
for (int j = 0; j < num_dims; ++j) {
@@ -68,5 +77,28 @@ TF_DataType GetTensorFlowDataType(TfLiteType type) {
}
}
+TfLiteType GetTensorFlowLiteType(TF_DataType type) {
+ switch (type) {
+ case TF_FLOAT:
+ return kTfLiteFloat32;
+ case TF_INT16:
+ return kTfLiteInt16;
+ case TF_INT32:
+ return kTfLiteInt32;
+ case TF_UINT8:
+ return kTfLiteUInt8;
+ case TF_INT64:
+ return kTfLiteInt64;
+ case TF_COMPLEX64:
+ return kTfLiteComplex64;
+ case TF_STRING:
+ return kTfLiteString;
+ case TF_BOOL:
+ return kTfLiteBool;
+ default:
+ return kTfLiteNoType;
+ }
+}
+
} // namespace eager
} // namespace tflite
diff --git a/tensorflow/contrib/lite/delegates/eager/util.h b/tensorflow/contrib/lite/delegates/eager/util.h
index a9407be071..ff500d18f3 100644
--- a/tensorflow/contrib/lite/delegates/eager/util.h
+++ b/tensorflow/contrib/lite/delegates/eager/util.h
@@ -28,14 +28,19 @@ namespace eager {
TfLiteStatus ConvertStatus(TfLiteContext* context,
const tensorflow::Status& status);
-// Copies the given shape of the given 'src' into a TF Lite 'tensor'. Logs an
-// error and returns kTfLiteError if the shape can't be converted.
-TfLiteStatus CopyShape(TfLiteContext* context, const tensorflow::Tensor& src,
- TfLiteTensor* tensor);
+// Copies the given shape and type of the TensorFlow 'src' tensor into a TF Lite
+// 'tensor'. Logs an error and returns kTfLiteError if the shape or type can't
+// be converted.
+TfLiteStatus CopyShapeAndType(TfLiteContext* context,
+ const tensorflow::Tensor& src,
+ TfLiteTensor* tensor);
// Returns the TF C API Data type that corresponds to the given TfLiteType.
TF_DataType GetTensorFlowDataType(TfLiteType type);
+// Returns the TfLiteType that corresponds to the given TF C API Data type.
+TfLiteType GetTensorFlowLiteType(TF_DataType);
+
} // namespace eager
} // namespace tflite
diff --git a/tensorflow/contrib/lite/delegates/eager/util_test.cc b/tensorflow/contrib/lite/delegates/eager/util_test.cc
index 53378a1eaf..aebc91149c 100644
--- a/tensorflow/contrib/lite/delegates/eager/util_test.cc
+++ b/tensorflow/contrib/lite/delegates/eager/util_test.cc
@@ -26,6 +26,7 @@ namespace eager {
namespace {
using tensorflow::DT_FLOAT;
+using tensorflow::DT_INT32;
using tensorflow::Tensor;
using ::testing::ElementsAre;
@@ -71,27 +72,41 @@ TEST(UtilTest, ConvertStatus) {
EXPECT_TRUE(context.error.empty());
}
-TEST(UtilTest, CopyShape) {
+TEST(UtilTest, CopyShapeAndType) {
TestContext context;
context.ReportError = ReportError;
context.ResizeTensor = ResizeTensor;
TfLiteTensor dst;
- EXPECT_EQ(CopyShape(&context, Tensor(), &dst), kTfLiteOk);
+ EXPECT_EQ(CopyShapeAndType(&context, Tensor(), &dst), kTfLiteOk);
EXPECT_THAT(context.new_size, ElementsAre(0));
+ EXPECT_EQ(dst.type, kTfLiteFloat32);
- EXPECT_EQ(CopyShape(&context, Tensor(DT_FLOAT, {1, 2}), &dst), kTfLiteOk);
+ EXPECT_EQ(CopyShapeAndType(&context, Tensor(DT_FLOAT, {1, 2}), &dst),
+ kTfLiteOk);
EXPECT_THAT(context.new_size, ElementsAre(1, 2));
+ EXPECT_EQ(dst.type, kTfLiteFloat32);
- EXPECT_EQ(CopyShape(&context, Tensor(DT_FLOAT, {1LL << 44, 2}), &dst),
+ EXPECT_EQ(CopyShapeAndType(&context, Tensor(DT_INT32, {1, 2}), &dst),
+ kTfLiteOk);
+ EXPECT_THAT(context.new_size, ElementsAre(1, 2));
+ EXPECT_EQ(dst.type, kTfLiteInt32);
+
+ EXPECT_EQ(CopyShapeAndType(&context, Tensor(DT_FLOAT, {1LL << 44, 2}), &dst),
kTfLiteError);
EXPECT_EQ(context.error,
"Dimension value in TensorFlow shape is larger than supported by "
"TF Lite");
+
+ EXPECT_EQ(
+ CopyShapeAndType(&context, Tensor(tensorflow::DT_HALF, {1, 2}), &dst),
+ kTfLiteError);
+ EXPECT_EQ(context.error,
+ "TF Lite does not support TensorFlow data type: half");
}
-TEST(UtilTest, TypeConversions) {
+TEST(UtilTest, TypeConversionsFromTFLite) {
EXPECT_EQ(TF_FLOAT, GetTensorFlowDataType(kTfLiteNoType));
EXPECT_EQ(TF_FLOAT, GetTensorFlowDataType(kTfLiteFloat32));
EXPECT_EQ(TF_INT16, GetTensorFlowDataType(kTfLiteInt16));
@@ -103,6 +118,19 @@ TEST(UtilTest, TypeConversions) {
EXPECT_EQ(TF_BOOL, GetTensorFlowDataType(kTfLiteBool));
}
+TEST(UtilTest, TypeConversionsFromTensorFlow) {
+ EXPECT_EQ(kTfLiteFloat32, GetTensorFlowLiteType(TF_FLOAT));
+ EXPECT_EQ(kTfLiteInt16, GetTensorFlowLiteType(TF_INT16));
+ EXPECT_EQ(kTfLiteInt32, GetTensorFlowLiteType(TF_INT32));
+ EXPECT_EQ(kTfLiteUInt8, GetTensorFlowLiteType(TF_UINT8));
+ EXPECT_EQ(kTfLiteInt64, GetTensorFlowLiteType(TF_INT64));
+ EXPECT_EQ(kTfLiteComplex64, GetTensorFlowLiteType(TF_COMPLEX64));
+ EXPECT_EQ(kTfLiteString, GetTensorFlowLiteType(TF_STRING));
+ EXPECT_EQ(kTfLiteBool, GetTensorFlowLiteType(TF_BOOL));
+ EXPECT_EQ(kTfLiteNoType, GetTensorFlowLiteType(TF_RESOURCE));
+ EXPECT_EQ(kTfLiteNoType, GetTensorFlowLiteType(TF_VARIANT));
+}
+
} // namespace
} // namespace eager
} // namespace tflite
diff --git a/tensorflow/contrib/lite/g3doc/models.md b/tensorflow/contrib/lite/g3doc/models.md
index 0f9d016e6d..88f6cda420 100644
--- a/tensorflow/contrib/lite/g3doc/models.md
+++ b/tensorflow/contrib/lite/g3doc/models.md
@@ -3,33 +3,34 @@
## Image classification (Float Models)
-Model Name | Paper_Model_Files^ | Model_Size | Top-1 Accuracy | Top-5 Accuracy | TF Lite Performance^^ | Tensorflow Performance
-------------------- | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | ---------: | -------------: | -------------: | --------------------: | ---------------------:
-DenseNet | [paper](https://arxiv.org/abs/1608.06993), [tflite&pb](https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/densenet_2018_04_27.tgz) | 43.6 Mb | 64.2% | 85.6% | 894 ms | 1262 ms
-SqueezeNet | [paper](https://arxiv.org/abs/1602.07360), [tflite&pb](https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/squeezenet_2018_04_27.tgz) | 5.0 Mb | 49.0% | 72.9% | 224 ms | 255 ms
-NASNet mobile | [paper](https://arxiv.org/abs/1707.07012), [tflite&pb](https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/nasnet_mobile_2018_04_27.tgz) | 21.4 Mb | 74.2% | 91.7% | 261 ms | 389 ms
-NASNet large | [paper](https://arxiv.org/abs/1707.07012), [tflite&pb](https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/nasnet_large_2018_04_27.tgz) | 355.3 Mb | 82.8% | 96.2% | 6697 ms | 7940 ms
-ResNet_V2_50 | [paper](https://arxiv.org/abs/1603.05027), [tflite&pb](https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/resnet_v2_50_2018_04_27.tgz) | 102.3 Mb | 68.1% | 88.4% | 942 ms | 1008 ms
-ResNet_V2_101 | [paper](https://arxiv.org/abs/1603.05027), [tflite&pb](https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/resnet_v2_101_2018_04_27.tgz) | 178.3 Mb | 70.4% | 89.6% | 1880 ms | 1970 ms
-Inception_V3 | [paper](http://arxiv.org/abs/1512.00567), [tflite&pb](https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v3_2018_04_27.tgz) | 95.3 Mb | 78.2% | 94.0% | 1433 ms | 1522 ms
-Inception_V4 | [paper](http://arxiv.org/abs/1602.07261), [tflite&pb](https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v4_2018_04_27.tgz) | 170.7 Mb | 80.4% | 95.2% | 2986 ms | 3139 ms
-Inception_ResNet_V2 | [paper](https://arxiv.org/abs/1602.07261), [tflite&pb](https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_resnet_v2_2018_04_27.tgz) | 121.0 Mb | 77.8% | 94.1% | 2731 ms | 2926 ms
-Mobilenet_V1_0.25_128 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_128.tgz) | 1.9 Mb | 41.6% | 66.6% | 6.2 ms | 13.0 ms
-Mobilenet_V1_0.25_160 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_160.tgz) | 1.9 Mb | 45.7% | 70.6% | 8.6 ms | 19.5 ms
-Mobilenet_V1_0.25_192 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_192.tgz) | 1.9 Mb | 47.5% | 72.4% | 12.1 ms | 27.8 ms
-Mobilenet_V1_0.25_224 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_224.tgz) | 1.9 Mb | 50.0% | 74.4% | 16.2 ms | 37.3 ms
-Mobilenet_V1_0.50_128 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.5_128.tgz) | 5.3 Mb | 56.5% | 79.5% | 18.1 ms | 29.9 ms
-Mobilenet_V1_0.50_160 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.5_160.tgz) | 5.3 Mb | 59.3% | 82.1% | 26.8 ms | 45.9 ms
-Mobilenet_V1_0.50_192 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.5_192.tgz) | 5.3 Mb | 62.0% | 83.7% | 35.6 ms | 65.3 ms
-Mobilenet_V1_0.50_224 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.5_224.tgz) | 5.3 Mb | 63.5% | 85.0% | 47.6 ms | 164.2 ms
-Mobilenet_V1_0.75_128 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.75_128.tgz) | 10.3 Mb | 62.3% | 84.1% | 34.6 ms | 48.7 ms
-Mobilenet_V1_0.75_160 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.75_160.tgz) | 10.3 Mb | 65.5% | 86.1% | 51.3 ms | 75.2 ms
-Mobilenet_V1_0.75_192 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.75_192.tgz) | 10.3 Mb | 67.4% | 87.4% | 71.7 ms | 107.0 ms
-Mobilenet_V1_0.75_224 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.75_224.tgz) | 10.3 Mb | 68.6% | 88.3% | 95.7 ms | 143.4 ms
-Mobilenet_V1_1.0_128 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_128.tgz) | 16.9 Mb | 65.5% | 85.9% | 57.4 ms | 76.8 ms
-Mobilenet_V1_1.0_160 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_160.tgz) | 16.9 Mb | 68.3% | 87.8% | 86.0 ms | 117.7 ms
-Mobilenet_V1_1.0_192 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_192.tgz) | 16.9 Mb | 70.2% | 89.3% | 118.6 ms | 167.3 ms
-Mobilenet_V1_1.0_224 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224.tgz) | 16.9 Mb | 71.3% | 90.1% | 160.1 ms | 224.3 ms
+Model Name | Paper_Model_Files^ | Model_Size | Top-1 Accuracy | Top-5 Accuracy | TF Lite Performance^^ | Tensorflow Performance
+--------------------- | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | ---------: | -------------: | -------------: | --------------------: | ---------------------:
+DenseNet | [paper](https://arxiv.org/abs/1608.06993), [tflite&pb](https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/densenet_2018_04_27.tgz) | 43.6 Mb | 64.2% | 85.6% | 894 ms | 1262 ms
+SqueezeNet | [paper](https://arxiv.org/abs/1602.07360), [tflite&pb](https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/squeezenet_2018_04_27.tgz) | 5.0 Mb | 49.0% | 72.9% | 224 ms | 255 ms
+NASNet mobile | [paper](https://arxiv.org/abs/1707.07012), [tflite&pb](https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/nasnet_mobile_2018_04_27.tgz) | 21.4 Mb | 74.2% | 91.7% | 261 ms | 389 ms
+NASNet large | [paper](https://arxiv.org/abs/1707.07012), [tflite&pb](https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/nasnet_large_2018_04_27.tgz) | 355.3 Mb | 82.8% | 96.2% | 6697 ms | 7940 ms
+ResNet_V2_50 | [paper](https://arxiv.org/abs/1603.05027), [tflite&pb](https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/resnet_v2_50_2018_04_27.tgz) | 102.3 Mb | 68.1% | 88.4% | 942 ms | 1008 ms
+ResNet_V2_101 | [paper](https://arxiv.org/abs/1603.05027), [tflite&pb](https://storage.googleapis.com/download.tensorflow.org/models/tflite_11_05_08/resnet_v2_101.tgz) | 178.3 Mb | 70.4% | 89.6% | 1880 ms | 1970 ms
+Inception_V3 | [paper](http://arxiv.org/abs/1512.00567), [tflite&pb](https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v3_2018_04_27.tgz) | 95.3 Mb | 78.2% | 94.0% | 1433 ms | 1522 ms
+Inception_V4 | [paper](http://arxiv.org/abs/1602.07261), [tflite&pb](https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v4_2018_04_27.tgz) | 170.7 Mb | 80.4% | 95.2% | 2986 ms | 3139 ms
+Inception_ResNet_V2 | [paper](https://arxiv.org/abs/1602.07261), [tflite&pb](https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_resnet_v2_2018_04_27.tgz) | 121.0 Mb | 77.8% | 94.1% | 2731 ms | 2926 ms
+Mobilenet_V1_0.25_128 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_128.tgz) | 1.9 Mb | 41.6% | 66.6% | 6.2 ms | 13.0 ms
+Mobilenet_V1_0.25_160 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_160.tgz) | 1.9 Mb | 45.7% | 70.6% | 8.6 ms | 19.5 ms
+Mobilenet_V1_0.25_192 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_192.tgz) | 1.9 Mb | 47.5% | 72.4% | 12.1 ms | 27.8 ms
+Mobilenet_V1_0.25_224 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_224.tgz) | 1.9 Mb | 50.0% | 74.4% | 16.2 ms | 37.3 ms
+Mobilenet_V1_0.50_128 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.5_128.tgz) | 5.3 Mb | 56.5% | 79.5% | 18.1 ms | 29.9 ms
+Mobilenet_V1_0.50_160 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.5_160.tgz) | 5.3 Mb | 59.3% | 82.1% | 26.8 ms | 45.9 ms
+Mobilenet_V1_0.50_192 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.5_192.tgz) | 5.3 Mb | 62.0% | 83.7% | 35.6 ms | 65.3 ms
+Mobilenet_V1_0.50_224 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.5_224.tgz) | 5.3 Mb | 63.5% | 85.0% | 47.6 ms | 164.2 ms
+Mobilenet_V1_0.75_128 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.75_128.tgz) | 10.3 Mb | 62.3% | 84.1% | 34.6 ms | 48.7 ms
+Mobilenet_V1_0.75_160 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.75_160.tgz) | 10.3 Mb | 65.5% | 86.1% | 51.3 ms | 75.2 ms
+Mobilenet_V1_0.75_192 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.75_192.tgz) | 10.3 Mb | 67.4% | 87.4% | 71.7 ms | 107.0 ms
+Mobilenet_V1_0.75_224 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.75_224.tgz) | 10.3 Mb | 68.6% | 88.3% | 95.7 ms | 143.4 ms
+Mobilenet_V1_1.0_128 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_128.tgz) | 16.9 Mb | 65.5% | 85.9% | 57.4 ms | 76.8 ms
+Mobilenet_V1_1.0_160 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_160.tgz) | 16.9 Mb | 68.3% | 87.8% | 86.0 ms | 117.7 ms
+Mobilenet_V1_1.0_192 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_192.tgz) | 16.9 Mb | 70.2% | 89.3% | 118.6 ms | 167.3 ms
+Mobilenet_V1_1.0_224 | [paper](https://arxiv.org/pdf/1704.04861.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224.tgz) | 16.9 Mb | 71.3% | 90.1% | 160.1 ms | 224.3 ms
+Mobilenet_V2_1.0_224 | [paper](https://arxiv.org/pdf/1801.04381.pdf), [tflite&pb](http://download.tensorflow.org/models/tflite_11_05_08/mobilenet_v2_1.0_224.tgz) | 14.0 Mb | 71.9% | 90.1% | 117 ms |
^ The model files include both TF Lite FlatBuffer and Tensorflow frozen Graph.
@@ -41,8 +42,8 @@ after excluding blacklisted images.
## Image classification (Quantized Models)
-Model Name | Paper_Model_Files | Model_Size | Top-1 Accuracy | Top-5 Accuracy | TF Lite Performance
------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------: | ---------: | -------------: | -------------: | ------------------:
+Model Name | Paper_Model_Files | Model_Size | Top-1 Accuracy | Top-5 Accuracy | TF Lite Performance
+--------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------: | ---------: | -------------: | -------------: | ------------------:
Mobilenet_V1_0.25_128_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_0.25_128_quant.tgz) | 0.5 Mb | 39.8% | 64.8% | 3.7 ms
Mobilenet_V1_0.25_160_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_0.25_160_quant.tgz) | 0.5 Mb | 43.0% | 68.4% | 5.5 ms
Mobilenet_V1_0.25_192_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_0.25_192_quant.tgz) | 0.5 Mb | 46.0% | 71.2% | 7.9 ms
@@ -59,9 +60,12 @@ Mobilenet_V1_1.0_128_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tf
Mobilenet_V1_1.0_160_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_160_quant.tgz) | 4.3 Mb | 67.2% | 86.9% | 37.4 ms
Mobilenet_V1_1.0_192_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_192_quant.tgz) | 4.3 Mb | 69.4% | 88.3% | 51.9 ms
Mobilenet_V1_1.0_224_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz) | 4.3 Mb | 70.2% | 89.1% | 70.2 ms
+Mobilenet_v2_1.0_224_quant | [paper](https://arxiv.org/abs/1806.08342), [tflite&pb](http://download.tensorflow.org/models/tflite_11_05_08/mobilenet_v2_1.0_224_quant.tgz) | 3.4 Mb | 71.1% | 90.1% | 80.3 ms
+Inception_v3_quant | [paper](https://arxiv.org/abs/1806.08342),[tflite&pb](http://download.tensorflow.org/models/tflite_11_05_08/inception_v3_quant.tgz) | 23 Mb | 77.5% | 93.6% | 637 ms
## Other models
-Model | TF Lite FlatBuffer
------------------------ | :----------------:
-Smart Reply 1.0 Android | [reference](https://research.googleblog.com/2017/11/on-device-conversational-modeling-with.html), [tflite](https://storage.googleapis.com/download.tensorflow.org/models/smartreply_1.0_2017_11_01.zip)
+Lite FlatBuffer ----------------------- | :----------------: Smart Reply 1.0
+Android |
+[reference](https://research.googleblog.com/2017/11/on-device-conversational-modeling-with.html),
+[tflite](https://storage.googleapis.com/download.tensorflow.org/models/smartreply_1.0_2017_11_01.zip)
diff --git a/tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc b/tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc
index cde4f55a16..6b8ecdd5c3 100644
--- a/tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc
+++ b/tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc
@@ -104,6 +104,19 @@ constexpr int kBwInputActivationStateTensor = 37;
// Cell state tensors of size {n_batch, n_cell}
constexpr int kBwInputCellStateTensor = 38;
+// Auxiliary input and weights when stacking.
+constexpr int kAuxInputTensor = 39; // Optional
+// Forward weights.
+constexpr int kFwAuxInputToInputWeightsTensor = 40; // Optional
+constexpr int kFwAuxInputToForgetWeightsTensor = 41; // Optional
+constexpr int kFwAuxInputToCellWeightsTensor = 42; // Optional
+constexpr int kFwAuxInputToOutputWeightsTensor = 43; // Optional
+// Backward weights.
+constexpr int kBwAuxInputToInputWeightsTensor = 44; // Optional
+constexpr int kBwAuxInputToForgetWeightsTensor = 45; // Optional
+constexpr int kBwAuxInputToCellWeightsTensor = 46; // Optional
+constexpr int kBwAuxInputToOutputWeightsTensor = 47; // Optional
+
// Output tensors.
constexpr int kFwOutputTensor = 0;
constexpr int kBwOutputTensor = 1;
@@ -115,14 +128,15 @@ enum TemporaryTensor {
kBwScratchBuffer = 1,
// Quantized tensors needed for the hybrid kernel.
kInputQuantized = 2,
- kFwActivationStateQuantized = 3,
- kBwActivationStateQuantized = 4,
- kFwCellStateQuantized = 5,
- kBwCellStateQuantized = 6,
- kScalingFactors = 7,
- kProductScalingFactors = 8,
- kRecoveredCellWeights = 9,
- kNumTemporaryTensors = 10
+ kAuxInputQuantized = 3, // Quantized tensor needed for auxiliary input.
+ kFwActivationStateQuantized = 4,
+ kBwActivationStateQuantized = 5,
+ kFwCellStateQuantized = 6,
+ kBwCellStateQuantized = 7,
+ kScalingFactors = 8,
+ kProductScalingFactors = 9,
+ kRecoveredCellWeights = 10,
+ kNumTemporaryTensors = 11
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
@@ -335,7 +349,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
int* scratch_tensor_index = reinterpret_cast<int*>(node->user_data);
// Check we have all the inputs and outputs we need.
- TF_LITE_ENSURE_EQ(context, node->inputs->size, 39);
+ TF_LITE_ENSURE_EQ(context, node->inputs->size, 48);
TF_LITE_ENSURE_EQ(context, node->outputs->size, 2);
// Inferring batch size, number of outputs and sequence length and
@@ -366,6 +380,48 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
context, CheckInputTensorDimensions(context, node, n_input, n_fw_output,
n_fw_cell));
+ // Get (optional) auxiliary inputs and weights.
+ const TfLiteTensor* aux_input =
+ GetOptionalInputTensor(context, node, kAuxInputTensor);
+ const TfLiteTensor* fw_aux_input_to_input_weights =
+ GetOptionalInputTensor(context, node, kFwAuxInputToInputWeightsTensor);
+ const TfLiteTensor* fw_aux_input_to_forget_weights =
+ GetOptionalInputTensor(context, node, kFwAuxInputToForgetWeightsTensor);
+ const TfLiteTensor* fw_aux_input_to_cell_weights =
+ GetOptionalInputTensor(context, node, kFwAuxInputToCellWeightsTensor);
+ const TfLiteTensor* fw_aux_input_to_output_weights =
+ GetOptionalInputTensor(context, node, kFwAuxInputToOutputWeightsTensor);
+ const TfLiteTensor* bw_aux_input_to_input_weights =
+ GetOptionalInputTensor(context, node, kBwAuxInputToInputWeightsTensor);
+ const TfLiteTensor* bw_aux_input_to_forget_weights =
+ GetOptionalInputTensor(context, node, kBwAuxInputToForgetWeightsTensor);
+ const TfLiteTensor* bw_aux_input_to_cell_weights =
+ GetOptionalInputTensor(context, node, kBwAuxInputToCellWeightsTensor);
+ const TfLiteTensor* bw_aux_input_to_output_weights =
+ GetOptionalInputTensor(context, node, kBwAuxInputToOutputWeightsTensor);
+
+ const bool aux_inputs_all_or_none =
+ ((aux_input != nullptr) && (fw_aux_input_to_cell_weights != nullptr) &&
+ (fw_aux_input_to_forget_weights != nullptr) &&
+ (fw_aux_input_to_output_weights != nullptr) &&
+ (bw_aux_input_to_cell_weights != nullptr) &&
+ (bw_aux_input_to_forget_weights != nullptr) &&
+ (bw_aux_input_to_output_weights != nullptr)) ||
+ ((fw_aux_input_to_cell_weights == nullptr) &&
+ (fw_aux_input_to_forget_weights == nullptr) &&
+ (fw_aux_input_to_output_weights == nullptr) &&
+ (bw_aux_input_to_cell_weights == nullptr) &&
+ (bw_aux_input_to_forget_weights == nullptr) &&
+ (bw_aux_input_to_output_weights == nullptr));
+ TF_LITE_ENSURE(context, aux_inputs_all_or_none);
+ const bool has_aux_input = (aux_input != nullptr);
+
+ if (has_aux_input) {
+ // Check that aux_input has the same dimensions (except last) as the input.
+ TF_LITE_ASSERT_EQ(aux_input->dims->data[0], input->dims->data[0]);
+ TF_LITE_ASSERT_EQ(aux_input->dims->data[1], input->dims->data[1]);
+ }
+
// Get the pointer to output, activation_state and cell_state buffer tensors.
TfLiteTensor* fw_output = GetOutput(context, node, kFwOutputTensor);
TfLiteTensor* fw_activation_state =
@@ -406,6 +462,10 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* fw_input_to_input_weights =
GetOptionalInputTensor(context, node, kFwInputToInputWeightsTensor);
+ if (has_aux_input) {
+ TF_LITE_ENSURE_EQ(context, fw_aux_input_to_input_weights->dims->data[0],
+ fw_input_to_input_weights->dims->data[0]);
+ }
const bool fw_use_cifg = (fw_input_to_input_weights == nullptr);
TfLiteIntArray* fw_scratch_buffer_size = TfLiteIntArrayCreate(2);
fw_scratch_buffer_size->data[0] = n_batch;
@@ -470,6 +530,10 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* bw_input_to_input_weights =
GetOptionalInputTensor(context, node, kBwInputToInputWeightsTensor);
+ if (has_aux_input) {
+ TF_LITE_ENSURE_EQ(context, bw_aux_input_to_input_weights->dims->data[0],
+ bw_input_to_input_weights->dims->data[0]);
+ }
const bool bw_use_cifg = (bw_input_to_input_weights == nullptr);
TfLiteIntArray* bw_scratch_buffer_size = TfLiteIntArrayCreate(2);
bw_scratch_buffer_size->data[0] = n_batch;
@@ -483,8 +547,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, bw_scratch_buffer,
bw_scratch_buffer_size));
if (is_hybrid_op) {
- // Allocate temporary tensors to store quantized values of input,
- // output_state and cell_state tensors.
+ // Allocate temporary tensors to store quantized values of input, aux_input
+ // (if present), activation_state and cell_state tensors.
node->temporaries->data[kInputQuantized] =
*scratch_tensor_index + kInputQuantized;
TfLiteTensor* input_quantized =
@@ -497,6 +561,22 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
input_quantized_size));
}
+ if (has_aux_input) {
+ node->temporaries->data[kAuxInputQuantized] =
+ *scratch_tensor_index + kAuxInputQuantized;
+ TfLiteTensor* aux_input_quantized =
+ GetTemporary(context, node, kAuxInputQuantized);
+ aux_input_quantized->type = kTfLiteUInt8;
+ aux_input_quantized->allocation_type = kTfLiteArenaRw;
+ if (!TfLiteIntArrayEqual(aux_input_quantized->dims, aux_input->dims)) {
+ TfLiteIntArray* aux_input_quantized_size =
+ TfLiteIntArrayCopy(aux_input->dims);
+ TF_LITE_ENSURE_OK(context,
+ context->ResizeTensor(context, aux_input_quantized,
+ aux_input_quantized_size));
+ }
+ }
+
node->temporaries->data[kFwActivationStateQuantized] =
*scratch_tensor_index + kFwActivationStateQuantized;
TfLiteTensor* fw_activation_state_quantized =
@@ -617,7 +697,11 @@ TfLiteStatus EvalFloat(
const TfLiteTensor* recurrent_to_output_weights,
const TfLiteTensor* cell_to_input_weights,
const TfLiteTensor* cell_to_forget_weights,
- const TfLiteTensor* cell_to_output_weights,
+ const TfLiteTensor* cell_to_output_weights, const TfLiteTensor* aux_input,
+ const TfLiteTensor* aux_input_to_input_weights,
+ const TfLiteTensor* aux_input_to_forget_weights,
+ const TfLiteTensor* aux_input_to_cell_weights,
+ const TfLiteTensor* aux_input_to_output_weights,
const TfLiteTensor* input_gate_bias, const TfLiteTensor* forget_gate_bias,
const TfLiteTensor* cell_bias, const TfLiteTensor* output_gate_bias,
const TfLiteTensor* projection_weights, const TfLiteTensor* projection_bias,
@@ -627,6 +711,7 @@ TfLiteStatus EvalFloat(
const int max_time = input->dims->data[0];
const int n_batch = input->dims->data[1];
const int n_input = input->dims->data[2];
+ const int aux_input_size = (aux_input) ? aux_input->dims->data[2] : 0;
// n_cell and n_output will be the same size when there is no projection.
const int n_cell = input_to_output_weights->dims->data[0];
@@ -671,25 +756,41 @@ TfLiteStatus EvalFloat(
const float* projection_bias_ptr =
(projection_bias == nullptr) ? nullptr : projection_bias->data.f;
+ float* aux_input_ptr = nullptr;
+ float* aux_input_to_input_weights_ptr = nullptr;
+ float* aux_input_to_forget_weights_ptr = nullptr;
+ float* aux_input_to_cell_weights_ptr = nullptr;
+ float* aux_input_to_output_weights_ptr = nullptr;
+ if (aux_input_size > 0) {
+ aux_input_ptr = aux_input->data.f;
+ aux_input_to_input_weights_ptr = aux_input_to_input_weights->data.f;
+ aux_input_to_forget_weights_ptr = aux_input_to_forget_weights->data.f;
+ aux_input_to_cell_weights_ptr = aux_input_to_cell_weights->data.f;
+ aux_input_to_output_weights_ptr = aux_input_to_output_weights->data.f;
+ }
+
// Loop through the sequence.
if (forward_sequence) {
for (int t = 0; t < max_time; t++) {
const float* input_ptr = input->data.f + t * n_batch * n_input;
float* output_ptr_time = output->data.f + t * n_batch * n_output;
- kernel_utils::LstmStep(
+ kernel_utils::LstmStepWithAuxInput(
input_ptr, input_to_input_weights_ptr,
input_to_forget_weights->data.f, input_to_cell_weights->data.f,
- input_to_output_weights->data.f, recurrent_to_input_weights_ptr,
- recurrent_to_forget_weights->data.f,
+ input_to_output_weights->data.f, aux_input_ptr,
+ aux_input_to_input_weights_ptr, aux_input_to_forget_weights_ptr,
+ aux_input_to_cell_weights_ptr, aux_input_to_output_weights_ptr,
+ recurrent_to_input_weights_ptr, recurrent_to_forget_weights->data.f,
recurrent_to_cell_weights->data.f,
recurrent_to_output_weights->data.f, cell_to_input_weights_ptr,
cell_to_forget_weights_ptr, cell_to_output_weights_ptr,
input_gate_bias_ptr, forget_gate_bias->data.f, cell_bias->data.f,
output_gate_bias->data.f, projection_weights_ptr, projection_bias_ptr,
- params, n_batch, n_cell, n_input, n_output, activation_state->data.f,
- cell_state->data.f, input_gate_scratch, forget_gate_scratch,
- cell_scratch, output_gate_scratch, output_ptr_time);
+ params, n_batch, n_cell, n_input, aux_input_size, n_output,
+ activation_state->data.f, cell_state->data.f, input_gate_scratch,
+ forget_gate_scratch, cell_scratch, output_gate_scratch,
+ output_ptr_time);
}
} else {
// Loop through the sequence backwards.
@@ -697,19 +798,22 @@ TfLiteStatus EvalFloat(
const float* input_ptr = input->data.f + t * n_batch * n_input;
float* output_ptr_time = output->data.f + t * n_batch * n_output;
- kernel_utils::LstmStep(
+ kernel_utils::LstmStepWithAuxInput(
input_ptr, input_to_input_weights_ptr,
input_to_forget_weights->data.f, input_to_cell_weights->data.f,
- input_to_output_weights->data.f, recurrent_to_input_weights_ptr,
- recurrent_to_forget_weights->data.f,
+ input_to_output_weights->data.f, aux_input_ptr,
+ aux_input_to_input_weights_ptr, aux_input_to_forget_weights_ptr,
+ aux_input_to_cell_weights_ptr, aux_input_to_output_weights_ptr,
+ recurrent_to_input_weights_ptr, recurrent_to_forget_weights->data.f,
recurrent_to_cell_weights->data.f,
recurrent_to_output_weights->data.f, cell_to_input_weights_ptr,
cell_to_forget_weights_ptr, cell_to_output_weights_ptr,
input_gate_bias_ptr, forget_gate_bias->data.f, cell_bias->data.f,
output_gate_bias->data.f, projection_weights_ptr, projection_bias_ptr,
- params, n_batch, n_cell, n_input, n_output, activation_state->data.f,
- cell_state->data.f, input_gate_scratch, forget_gate_scratch,
- cell_scratch, output_gate_scratch, output_ptr_time);
+ params, n_batch, n_cell, n_input, aux_input_size, n_output,
+ activation_state->data.f, cell_state->data.f, input_gate_scratch,
+ forget_gate_scratch, cell_scratch, output_gate_scratch,
+ output_ptr_time);
}
}
return kTfLiteOk;
@@ -726,19 +830,25 @@ TfLiteStatus EvalHybrid(
const TfLiteTensor* recurrent_to_output_weights,
const TfLiteTensor* cell_to_input_weights,
const TfLiteTensor* cell_to_forget_weights,
- const TfLiteTensor* cell_to_output_weights,
+ const TfLiteTensor* cell_to_output_weights, const TfLiteTensor* aux_input,
+ const TfLiteTensor* aux_input_to_input_weights,
+ const TfLiteTensor* aux_input_to_forget_weights,
+ const TfLiteTensor* aux_input_to_cell_weights,
+ const TfLiteTensor* aux_input_to_output_weights,
const TfLiteTensor* input_gate_bias, const TfLiteTensor* forget_gate_bias,
const TfLiteTensor* cell_bias, const TfLiteTensor* output_gate_bias,
const TfLiteTensor* projection_weights, const TfLiteTensor* projection_bias,
const TfLiteLSTMParams* params, bool forward_sequence,
TfLiteTensor* scratch_buffer, TfLiteTensor* scaling_factors,
TfLiteTensor* prod_scaling_factors, TfLiteTensor* recovered_cell_weights,
- TfLiteTensor* input_quantized, TfLiteTensor* output_state_quantized,
- TfLiteTensor* cell_state_quantized, TfLiteTensor* output_state,
- TfLiteTensor* cell_state, TfLiteTensor* output) {
+ TfLiteTensor* input_quantized, TfLiteTensor* aux_input_quantized,
+ TfLiteTensor* output_state_quantized, TfLiteTensor* cell_state_quantized,
+ TfLiteTensor* output_state, TfLiteTensor* cell_state,
+ TfLiteTensor* output) {
const int max_time = input->dims->data[0];
const int n_batch = input->dims->data[1];
const int n_input = input->dims->data[2];
+ const int aux_input_size = (aux_input) ? aux_input->dims->data[2] : 0;
// n_cell and n_output will be the same size when there is no projection.
const int n_cell = input_to_output_weights->dims->data[0];
const int n_output = recurrent_to_output_weights->dims->data[1];
@@ -842,6 +952,10 @@ TfLiteStatus EvalHybrid(
// Temporary storage for quantized values and scaling factors.
int8_t* quantized_input_ptr =
reinterpret_cast<int8_t*>(input_quantized->data.uint8);
+ int8_t* quantized_aux_input_ptr =
+ (aux_input_quantized == nullptr)
+ ? nullptr
+ : reinterpret_cast<int8_t*>(aux_input_quantized->data.uint8);
int8_t* quantized_output_state_ptr =
reinterpret_cast<int8_t*>(output_state_quantized->data.uint8);
int8_t* quantized_cell_state_ptr =
@@ -850,31 +964,63 @@ TfLiteStatus EvalHybrid(
float* prod_scaling_factors_ptr = prod_scaling_factors->data.f;
float* recovered_cell_weights_ptr = recovered_cell_weights->data.f;
+ // Auxiliary input and weights.
+ float* aux_input_ptr = nullptr;
+ int8_t* aux_input_to_input_weights_ptr = nullptr;
+ int8_t* aux_input_to_forget_weights_ptr = nullptr;
+ int8_t* aux_input_to_cell_weights_ptr = nullptr;
+ int8_t* aux_input_to_output_weights_ptr = nullptr;
+ float aux_input_to_input_weights_scale = 0.0f;
+ float aux_input_to_forget_weights_scale = 0.0f;
+ float aux_input_to_cell_weights_scale = 0.0f;
+ float aux_input_to_output_weights_scale = 0.0f;
+ if (aux_input_size > 0) {
+ aux_input_ptr = aux_input->data.f;
+ aux_input_to_input_weights_ptr =
+ reinterpret_cast<int8_t*>(aux_input_to_input_weights->data.uint8);
+ aux_input_to_forget_weights_ptr =
+ reinterpret_cast<int8_t*>(aux_input_to_forget_weights->data.uint8);
+ aux_input_to_cell_weights_ptr =
+ reinterpret_cast<int8_t*>(aux_input_to_cell_weights->data.uint8);
+ aux_input_to_output_weights_ptr =
+ reinterpret_cast<int8_t*>(aux_input_to_output_weights->data.uint8);
+ aux_input_to_input_weights_scale = aux_input_to_input_weights->params.scale;
+ aux_input_to_forget_weights_scale =
+ aux_input_to_forget_weights->params.scale;
+ aux_input_to_cell_weights_scale = aux_input_to_cell_weights->params.scale;
+ aux_input_to_output_weights_scale =
+ aux_input_to_output_weights->params.scale;
+ }
if (forward_sequence) {
// Feed the sequence into the LSTM step-by-step.
for (int t = 0; t < max_time; t++) {
const float* input_ptr = input->data.f + t * n_batch * n_input;
float* output_ptr = output->data.f + t * n_batch * n_output;
- kernel_utils::LstmStep(
+ kernel_utils::LstmStepWithAuxInput(
input_ptr, input_to_input_weights_ptr, input_to_input_weights_scale,
input_to_forget_weights_ptr, input_to_forget_weights_scale,
input_to_cell_weights_ptr, input_to_cell_weights_scale,
input_to_output_weights_ptr, input_to_output_weights_scale,
- recurrent_to_input_weights_ptr, recurrent_to_input_weights_scale,
- recurrent_to_forget_weights_ptr, recurrent_to_forget_weights_scale,
- recurrent_to_cell_weights_ptr, recurrent_to_cell_weights_scale,
- recurrent_to_output_weights_ptr, recurrent_to_output_weights_scale,
- cell_to_input_weights_ptr, cell_to_input_weights_scale,
- cell_to_forget_weights_ptr, cell_to_forget_weights_scale,
- cell_to_output_weights_ptr, cell_to_output_weights_scale,
- input_gate_bias_ptr, forget_gate_bias_ptr, cell_bias_ptr,
- output_gate_bias_ptr, projection_weights_ptr,
- projection_weights_scale, projection_bias_ptr, params, n_batch,
- n_cell, n_input, n_output, input_gate_scratch, forget_gate_scratch,
- cell_scratch, output_gate_scratch, scaling_factors_ptr,
- prod_scaling_factors_ptr, recovered_cell_weights_ptr,
- quantized_input_ptr, quantized_output_state_ptr,
+ aux_input_ptr, aux_input_to_input_weights_ptr,
+ aux_input_to_input_weights_scale, aux_input_to_forget_weights_ptr,
+ aux_input_to_forget_weights_scale, aux_input_to_cell_weights_ptr,
+ aux_input_to_cell_weights_scale, aux_input_to_output_weights_ptr,
+ aux_input_to_output_weights_scale, recurrent_to_input_weights_ptr,
+ recurrent_to_input_weights_scale, recurrent_to_forget_weights_ptr,
+ recurrent_to_forget_weights_scale, recurrent_to_cell_weights_ptr,
+ recurrent_to_cell_weights_scale, recurrent_to_output_weights_ptr,
+ recurrent_to_output_weights_scale, cell_to_input_weights_ptr,
+ cell_to_input_weights_scale, cell_to_forget_weights_ptr,
+ cell_to_forget_weights_scale, cell_to_output_weights_ptr,
+ cell_to_output_weights_scale, input_gate_bias_ptr,
+ forget_gate_bias_ptr, cell_bias_ptr, output_gate_bias_ptr,
+ projection_weights_ptr, projection_weights_scale, projection_bias_ptr,
+ params, n_batch, n_cell, n_input, aux_input_size, n_output,
+ input_gate_scratch, forget_gate_scratch, cell_scratch,
+ output_gate_scratch, scaling_factors_ptr, prod_scaling_factors_ptr,
+ recovered_cell_weights_ptr, quantized_input_ptr,
+ quantized_aux_input_ptr, quantized_output_state_ptr,
quantized_cell_state_ptr, output_state_ptr, cell_state_ptr,
output_ptr);
}
@@ -884,25 +1030,30 @@ TfLiteStatus EvalHybrid(
const float* input_ptr = input->data.f + t * n_batch * n_input;
float* output_ptr = output->data.f + t * n_batch * n_output;
- kernel_utils::LstmStep(
+ kernel_utils::LstmStepWithAuxInput(
input_ptr, input_to_input_weights_ptr, input_to_input_weights_scale,
input_to_forget_weights_ptr, input_to_forget_weights_scale,
input_to_cell_weights_ptr, input_to_cell_weights_scale,
input_to_output_weights_ptr, input_to_output_weights_scale,
- recurrent_to_input_weights_ptr, recurrent_to_input_weights_scale,
- recurrent_to_forget_weights_ptr, recurrent_to_forget_weights_scale,
- recurrent_to_cell_weights_ptr, recurrent_to_cell_weights_scale,
- recurrent_to_output_weights_ptr, recurrent_to_output_weights_scale,
- cell_to_input_weights_ptr, cell_to_input_weights_scale,
- cell_to_forget_weights_ptr, cell_to_forget_weights_scale,
- cell_to_output_weights_ptr, cell_to_output_weights_scale,
- input_gate_bias_ptr, forget_gate_bias_ptr, cell_bias_ptr,
- output_gate_bias_ptr, projection_weights_ptr,
- projection_weights_scale, projection_bias_ptr, params, n_batch,
- n_cell, n_input, n_output, input_gate_scratch, forget_gate_scratch,
- cell_scratch, output_gate_scratch, scaling_factors_ptr,
- prod_scaling_factors_ptr, recovered_cell_weights_ptr,
- quantized_input_ptr, quantized_output_state_ptr,
+ aux_input_ptr, aux_input_to_input_weights_ptr,
+ aux_input_to_input_weights_scale, aux_input_to_forget_weights_ptr,
+ aux_input_to_forget_weights_scale, aux_input_to_cell_weights_ptr,
+ aux_input_to_cell_weights_scale, aux_input_to_output_weights_ptr,
+ aux_input_to_output_weights_scale, recurrent_to_input_weights_ptr,
+ recurrent_to_input_weights_scale, recurrent_to_forget_weights_ptr,
+ recurrent_to_forget_weights_scale, recurrent_to_cell_weights_ptr,
+ recurrent_to_cell_weights_scale, recurrent_to_output_weights_ptr,
+ recurrent_to_output_weights_scale, cell_to_input_weights_ptr,
+ cell_to_input_weights_scale, cell_to_forget_weights_ptr,
+ cell_to_forget_weights_scale, cell_to_output_weights_ptr,
+ cell_to_output_weights_scale, input_gate_bias_ptr,
+ forget_gate_bias_ptr, cell_bias_ptr, output_gate_bias_ptr,
+ projection_weights_ptr, projection_weights_scale, projection_bias_ptr,
+ params, n_batch, n_cell, n_input, aux_input_size, n_output,
+ input_gate_scratch, forget_gate_scratch, cell_scratch,
+ output_gate_scratch, scaling_factors_ptr, prod_scaling_factors_ptr,
+ recovered_cell_weights_ptr, quantized_input_ptr,
+ quantized_aux_input_ptr, quantized_output_state_ptr,
quantized_cell_state_ptr, output_state_ptr, cell_state_ptr,
output_ptr);
}
@@ -1004,17 +1155,39 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* bw_projection_bias =
GetOptionalInputTensor(context, node, kBwProjectionBiasTensor);
+ // State tensors.
TfLiteTensor* bw_activation_state =
GetVariableInput(context, node, kBwInputActivationStateTensor);
TfLiteTensor* bw_cell_state =
GetVariableInput(context, node, kBwInputCellStateTensor);
TfLiteTensor* bw_output = GetOutput(context, node, kBwOutputTensor);
+ // Temporary tensors.
TfLiteTensor* fw_scratch_buffer =
GetTemporary(context, node, kFwScratchBuffer);
TfLiteTensor* bw_scratch_buffer =
GetTemporary(context, node, kBwScratchBuffer);
+ // (Optional) auxiliary inputs.
+ const TfLiteTensor* aux_input =
+ GetOptionalInputTensor(context, node, kAuxInputTensor);
+ const TfLiteTensor* fw_aux_input_to_input_weights =
+ GetOptionalInputTensor(context, node, kFwAuxInputToInputWeightsTensor);
+ const TfLiteTensor* fw_aux_input_to_forget_weights =
+ GetOptionalInputTensor(context, node, kFwAuxInputToForgetWeightsTensor);
+ const TfLiteTensor* fw_aux_input_to_cell_weights =
+ GetOptionalInputTensor(context, node, kFwAuxInputToCellWeightsTensor);
+ const TfLiteTensor* fw_aux_input_to_output_weights =
+ GetOptionalInputTensor(context, node, kFwAuxInputToOutputWeightsTensor);
+ const TfLiteTensor* bw_aux_input_to_input_weights =
+ GetOptionalInputTensor(context, node, kBwAuxInputToInputWeightsTensor);
+ const TfLiteTensor* bw_aux_input_to_forget_weights =
+ GetOptionalInputTensor(context, node, kBwAuxInputToForgetWeightsTensor);
+ const TfLiteTensor* bw_aux_input_to_cell_weights =
+ GetOptionalInputTensor(context, node, kBwAuxInputToCellWeightsTensor);
+ const TfLiteTensor* bw_aux_input_to_output_weights =
+ GetOptionalInputTensor(context, node, kBwAuxInputToOutputWeightsTensor);
+
switch (fw_input_to_output_weights->type) {
case kTfLiteFloat32: {
TfLiteStatus fw_pass_status = EvalFloat(
@@ -1023,10 +1196,13 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
fw_recurrent_to_input_weights, fw_recurrent_to_forget_weights,
fw_recurrent_to_cell_weights, fw_recurrent_to_output_weights,
fw_cell_to_input_weights, fw_cell_to_forget_weights,
- fw_cell_to_output_weights, fw_input_gate_bias, fw_forget_gate_bias,
- fw_cell_bias, fw_output_gate_bias, fw_projection_weights,
- fw_projection_bias, params, /*forward_sequence=*/true,
- fw_scratch_buffer, fw_activation_state, fw_cell_state, fw_output);
+ fw_cell_to_output_weights, aux_input, fw_aux_input_to_input_weights,
+ fw_aux_input_to_forget_weights, fw_aux_input_to_cell_weights,
+ fw_aux_input_to_output_weights, fw_input_gate_bias,
+ fw_forget_gate_bias, fw_cell_bias, fw_output_gate_bias,
+ fw_projection_weights, fw_projection_bias, params,
+ /*forward_sequence=*/true, fw_scratch_buffer, fw_activation_state,
+ fw_cell_state, fw_output);
TF_LITE_ENSURE_OK(context, fw_pass_status);
TfLiteStatus bw_pass_status = EvalFloat(
@@ -1035,16 +1211,21 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
bw_recurrent_to_input_weights, bw_recurrent_to_forget_weights,
bw_recurrent_to_cell_weights, bw_recurrent_to_output_weights,
bw_cell_to_input_weights, bw_cell_to_forget_weights,
- bw_cell_to_output_weights, bw_input_gate_bias, bw_forget_gate_bias,
- bw_cell_bias, bw_output_gate_bias, bw_projection_weights,
- bw_projection_bias, params, /*forward_sequence=*/false,
- bw_scratch_buffer, bw_activation_state, bw_cell_state, bw_output);
+ bw_cell_to_output_weights, aux_input, bw_aux_input_to_input_weights,
+ bw_aux_input_to_forget_weights, bw_aux_input_to_cell_weights,
+ bw_aux_input_to_output_weights, bw_input_gate_bias,
+ bw_forget_gate_bias, bw_cell_bias, bw_output_gate_bias,
+ bw_projection_weights, bw_projection_bias, params,
+ /*forward_sequence=*/false, bw_scratch_buffer, bw_activation_state,
+ bw_cell_state, bw_output);
TF_LITE_ENSURE_OK(context, bw_pass_status);
return kTfLiteOk;
}
case kTfLiteUInt8: {
TfLiteTensor* input_quantized =
GetTemporary(context, node, kInputQuantized);
+ TfLiteTensor* aux_input_quantized =
+ GetTemporary(context, node, kAuxInputQuantized);
TfLiteTensor* fw_activation_state_quantized =
GetTemporary(context, node, kFwActivationStateQuantized);
TfLiteTensor* bw_activation_state_quantized =
@@ -1059,19 +1240,23 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
GetTemporary(context, node, kProductScalingFactors);
TfLiteTensor* recovered_cell_weights =
GetTemporary(context, node, kRecoveredCellWeights);
+
TfLiteStatus fw_pass_status = EvalHybrid(
input, fw_input_to_input_weights, fw_input_to_forget_weights,
fw_input_to_cell_weights, fw_input_to_output_weights,
fw_recurrent_to_input_weights, fw_recurrent_to_forget_weights,
fw_recurrent_to_cell_weights, fw_recurrent_to_output_weights,
fw_cell_to_input_weights, fw_cell_to_forget_weights,
- fw_cell_to_output_weights, fw_input_gate_bias, fw_forget_gate_bias,
- fw_cell_bias, fw_output_gate_bias, fw_projection_weights,
- fw_projection_bias, params, /*forward_sequence=*/true,
- fw_scratch_buffer, scaling_factors, prod_scaling_factors,
- recovered_cell_weights, input_quantized,
- fw_activation_state_quantized, fw_cell_state_quantized,
- fw_activation_state, fw_cell_state, fw_output);
+ fw_cell_to_output_weights, aux_input, fw_aux_input_to_input_weights,
+ fw_aux_input_to_forget_weights, fw_aux_input_to_cell_weights,
+ fw_aux_input_to_output_weights, fw_input_gate_bias,
+ fw_forget_gate_bias, fw_cell_bias, fw_output_gate_bias,
+ fw_projection_weights, fw_projection_bias, params,
+ /*forward_sequence=*/true, fw_scratch_buffer, scaling_factors,
+ prod_scaling_factors, recovered_cell_weights, input_quantized,
+ aux_input_quantized, fw_activation_state_quantized,
+ fw_cell_state_quantized, fw_activation_state, fw_cell_state,
+ fw_output);
TF_LITE_ENSURE_OK(context, fw_pass_status);
TfLiteStatus bw_pass_status = EvalHybrid(
@@ -1080,13 +1265,16 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
bw_recurrent_to_input_weights, bw_recurrent_to_forget_weights,
bw_recurrent_to_cell_weights, bw_recurrent_to_output_weights,
bw_cell_to_input_weights, bw_cell_to_forget_weights,
- bw_cell_to_output_weights, bw_input_gate_bias, bw_forget_gate_bias,
- bw_cell_bias, bw_output_gate_bias, bw_projection_weights,
- bw_projection_bias, params, /*forward_sequence=*/false,
- bw_scratch_buffer, scaling_factors, prod_scaling_factors,
- recovered_cell_weights, input_quantized,
- bw_activation_state_quantized, bw_cell_state_quantized,
- bw_activation_state, bw_cell_state, bw_output);
+ bw_cell_to_output_weights, aux_input, fw_aux_input_to_input_weights,
+ fw_aux_input_to_forget_weights, fw_aux_input_to_cell_weights,
+ fw_aux_input_to_output_weights, bw_input_gate_bias,
+ bw_forget_gate_bias, bw_cell_bias, bw_output_gate_bias,
+ bw_projection_weights, bw_projection_bias, params,
+ /*forward_sequence=*/false, bw_scratch_buffer, scaling_factors,
+ prod_scaling_factors, recovered_cell_weights, input_quantized,
+ aux_input_quantized, bw_activation_state_quantized,
+ bw_cell_state_quantized, bw_activation_state, bw_cell_state,
+ bw_output);
TF_LITE_ENSURE_OK(context, bw_pass_status);
return kTfLiteOk;
}
diff --git a/tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm_test.cc b/tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm_test.cc
index d058fab529..74ba8021c2 100644
--- a/tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm_test.cc
+++ b/tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm_test.cc
@@ -177,6 +177,16 @@ class BidirectionalLSTMOpModel : public SingleOpModel {
bw_output_ = AddOutput(TensorType_FLOAT32);
+ aux_input_ = AddNullInput();
+ fw_aux_input_to_input_weights_ = AddNullInput();
+ fw_aux_input_to_forget_weights_ = AddNullInput();
+ fw_aux_input_to_cell_weights_ = AddNullInput();
+ fw_aux_input_to_output_weights_ = AddNullInput();
+ bw_aux_input_to_input_weights_ = AddNullInput();
+ bw_aux_input_to_forget_weights_ = AddNullInput();
+ bw_aux_input_to_cell_weights_ = AddNullInput();
+ bw_aux_input_to_output_weights_ = AddNullInput();
+
SetBuiltinOp(BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM,
BuiltinOptions_LSTMOptions,
CreateLSTMOptions(builder_, ActivationFunctionType_TANH,
@@ -340,6 +350,16 @@ class BidirectionalLSTMOpModel : public SingleOpModel {
int fw_output_;
int bw_output_;
+ int aux_input_;
+ int fw_aux_input_to_input_weights_;
+ int fw_aux_input_to_forget_weights_;
+ int fw_aux_input_to_cell_weights_;
+ int fw_aux_input_to_output_weights_;
+ int bw_aux_input_to_input_weights_;
+ int bw_aux_input_to_forget_weights_;
+ int bw_aux_input_to_cell_weights_;
+ int bw_aux_input_to_output_weights_;
+
int n_batch_;
int n_input_;
int n_fw_cell_;
@@ -415,6 +435,16 @@ TEST(LSTMOpTest, BlackBoxTestNoCifgNoPeepholeNoProjectionNoClipping) {
{n_batch, n_output}, // activation_state tensor
{n_batch, n_cell}, // cell_state tensor
+
+ {n_batch, sequence_length, 0}, // aux_input tensor
+ {n_cell, 0}, // aux_fw_input_to_input tensor
+ {n_cell, 0}, // aux_fw_input_to_forget tensor
+ {n_cell, 0}, // aux_fw_input_to_cell tensor
+ {n_cell, 0}, // aux_fw_input_to_output tensor
+ {n_cell, 0}, // aux_bw_input_to_input tensor
+ {n_cell, 0}, // aux_bw_input_to_forget tensor
+ {n_cell, 0}, // aux_bw_input_to_cell tensor
+ {n_cell, 0}, // aux_bw_input_to_output tensor
});
lstm.SetInputToInputWeights({-0.45018822, -0.02338299, -0.0870589,
@@ -562,6 +592,16 @@ TEST(LSTMOpTest, BlackBoxTestNoCifgNoPeepholeNoProjectionNoClippingReverse) {
{n_batch, n_output}, // activation_state tensor
{n_batch, n_cell}, // cell_state tensor
+
+ {n_batch, sequence_length, 0}, // aux_input tensor
+ {n_cell, 0}, // aux_fw_input_to_input tensor
+ {n_cell, 0}, // aux_fw_input_to_forget tensor
+ {n_cell, 0}, // aux_fw_input_to_cell tensor
+ {n_cell, 0}, // aux_fw_input_to_output tensor
+ {n_cell, 0}, // aux_bw_input_to_input tensor
+ {n_cell, 0}, // aux_bw_input_to_forget tensor
+ {n_cell, 0}, // aux_bw_input_to_cell tensor
+ {n_cell, 0}, // aux_bw_input_to_output tensor
});
lstm.SetInputToInputWeights({-0.45018822, -0.02338299, -0.0870589,
@@ -709,6 +749,16 @@ TEST(LSTMOpTest, BlackBoxTestWithCifgWithPeepholeNoProjectionNoClipping) {
{n_batch, n_output}, // activation_state tensor
{n_batch, n_cell}, // cell_state tensor
+
+ {n_batch, sequence_length, 0}, // aux_input tensor
+ {n_cell, 0}, // aux_fw_input_to_input tensor
+ {n_cell, 0}, // aux_fw_input_to_forget tensor
+ {n_cell, 0}, // aux_fw_input_to_cell tensor
+ {n_cell, 0}, // aux_fw_input_to_output tensor
+ {n_cell, 0}, // aux_bw_input_to_input tensor
+ {n_cell, 0}, // aux_bw_input_to_forget tensor
+ {n_cell, 0}, // aux_bw_input_to_cell tensor
+ {n_cell, 0}, // aux_bw_input_to_output tensor
});
lstm.SetInputToCellWeights({-0.49770179, -0.27711356, -0.09624726, 0.05100781,
@@ -848,6 +898,16 @@ TEST(LSTMOpTest,
{n_batch, n_output}, // activation_state tensor
{n_batch, n_cell}, // cell_state tensor
+
+ {n_batch, sequence_length, 0}, // aux_input tensor
+ {n_cell, 0}, // aux_fw_input_to_input tensor
+ {n_cell, 0}, // aux_fw_input_to_forget tensor
+ {n_cell, 0}, // aux_fw_input_to_cell tensor
+ {n_cell, 0}, // aux_fw_input_to_output tensor
+ {n_cell, 0}, // aux_bw_input_to_input tensor
+ {n_cell, 0}, // aux_bw_input_to_forget tensor
+ {n_cell, 0}, // aux_bw_input_to_cell tensor
+ {n_cell, 0}, // aux_bw_input_to_output tensor
});
lstm.SetInputToCellWeights({-0.49770179, -0.27711356, -0.09624726, 0.05100781,
@@ -987,6 +1047,16 @@ TEST(LSTMOpTest, BlackBoxTestWithPeepholeWithProjectionNoClipping) {
{n_batch, n_output}, // activation_state tensor
{n_batch, n_cell}, // cell_state tensor
+
+ {n_batch, sequence_length, 0}, // aux_input tensor
+ {n_cell, 0}, // aux_fw_input_to_input tensor
+ {n_cell, 0}, // aux_fw_input_to_forget tensor
+ {n_cell, 0}, // aux_fw_input_to_cell tensor
+ {n_cell, 0}, // aux_fw_input_to_output tensor
+ {n_cell, 0}, // aux_bw_input_to_input tensor
+ {n_cell, 0}, // aux_bw_input_to_forget tensor
+ {n_cell, 0}, // aux_bw_input_to_cell tensor
+ {n_cell, 0}, // aux_bw_input_to_output tensor
});
lstm.SetInputToInputWeights(
diff --git a/tensorflow/contrib/lite/kernels/eigen_support.h b/tensorflow/contrib/lite/kernels/eigen_support.h
index ec77856b10..b235829642 100644
--- a/tensorflow/contrib/lite/kernels/eigen_support.h
+++ b/tensorflow/contrib/lite/kernels/eigen_support.h
@@ -18,7 +18,7 @@ limitations under the License.
#include "tensorflow/contrib/lite/context.h"
namespace EigenForTFLite {
-class ThreadPoolDevice;
+struct ThreadPoolDevice;
}
namespace tflite {
diff --git a/tensorflow/contrib/lite/kernels/internal/kernel_utils.cc b/tensorflow/contrib/lite/kernels/internal/kernel_utils.cc
index 360b472c45..b9dd40ddf9 100644
--- a/tensorflow/contrib/lite/kernels/internal/kernel_utils.cc
+++ b/tensorflow/contrib/lite/kernels/internal/kernel_utils.cc
@@ -203,9 +203,9 @@ void LstmStep(
cell_to_input_weights_ptr, cell_to_forget_weights_ptr,
cell_to_output_weights_ptr, input_gate_bias_ptr, forget_gate_bias_ptr,
cell_bias_ptr, output_gate_bias_ptr, projection_weights_ptr,
- projection_bias_ptr, params, n_batch, n_cell, n_input, n_output,
- output_state_ptr, cell_state_ptr, input_gate_scratch, forget_gate_scratch,
- cell_scratch, output_gate_scratch, output_ptr_batch);
+ projection_bias_ptr, params, n_batch, n_cell, n_input, /*n_aux_input=*/0,
+ n_output, output_state_ptr, cell_state_ptr, input_gate_scratch,
+ forget_gate_scratch, cell_scratch, output_gate_scratch, output_ptr_batch);
}
void LstmStepWithAuxInput(
@@ -227,8 +227,8 @@ void LstmStepWithAuxInput(
const float* forget_gate_bias_ptr, const float* cell_bias_ptr,
const float* output_gate_bias_ptr, const float* projection_weights_ptr,
const float* projection_bias_ptr, const TfLiteLSTMParams* params,
- int n_batch, int n_cell, int n_input, int n_output, float* output_state_ptr,
- float* cell_state_ptr, float* input_gate_scratch,
+ int n_batch, int n_cell, int n_input, int n_aux_input, int n_output,
+ float* output_state_ptr, float* cell_state_ptr, float* input_gate_scratch,
float* forget_gate_scratch, float* cell_scratch, float* output_gate_scratch,
float* output_ptr_batch) {
// Since we have already checked that weights are all there or none, we can
@@ -268,19 +268,20 @@ void LstmStepWithAuxInput(
if (aux_input_ptr_batch != nullptr) {
if (!use_cifg) {
tensor_utils::MatrixBatchVectorMultiplyAccumulate(
- aux_input_to_input_weights_ptr, n_cell, n_input, aux_input_ptr_batch,
- n_batch, input_gate_scratch, /*result_stride=*/1);
+ aux_input_to_input_weights_ptr, n_cell, n_aux_input,
+ aux_input_ptr_batch, n_batch, input_gate_scratch,
+ /*result_stride=*/1);
}
tensor_utils::MatrixBatchVectorMultiplyAccumulate(
- aux_input_to_forget_weights_ptr, n_cell, n_input, aux_input_ptr_batch,
- n_batch, forget_gate_scratch, /*result_stride=*/1);
+ aux_input_to_forget_weights_ptr, n_cell, n_aux_input,
+ aux_input_ptr_batch, n_batch, forget_gate_scratch, /*result_stride=*/1);
tensor_utils::MatrixBatchVectorMultiplyAccumulate(
- aux_input_to_cell_weights_ptr, n_cell, n_input, aux_input_ptr_batch,
+ aux_input_to_cell_weights_ptr, n_cell, n_aux_input, aux_input_ptr_batch,
n_batch, cell_scratch, /*result_stride=*/1);
tensor_utils::MatrixBatchVectorMultiplyAccumulate(
- aux_input_to_output_weights_ptr, n_cell, n_input, aux_input_ptr_batch,
- n_batch, output_gate_scratch, /*result_stride=*/1);
+ aux_input_to_output_weights_ptr, n_cell, n_aux_input,
+ aux_input_ptr_batch, n_batch, output_gate_scratch, /*result_stride=*/1);
}
// For each batch and cell: compute recurrent_weight * output_state.
@@ -432,10 +433,11 @@ void LstmStep(
cell_to_output_weights_ptr, cell_to_output_weights_scale,
input_gate_bias_ptr, forget_gate_bias_ptr, cell_bias_ptr,
output_gate_bias_ptr, projection_weights_ptr, projection_weights_scale,
- projection_bias_ptr, params, n_batch, n_cell, n_input, n_output,
- input_gate_scratch, forget_gate_scratch, cell_scratch,
- output_gate_scratch, scaling_factors, product_scaling_factors,
- recovered_cell_weights, quantized_input_ptr_batch,
+ projection_bias_ptr, params, n_batch, n_cell, n_input,
+ /*n_aux_input=*/0, n_output, input_gate_scratch, forget_gate_scratch,
+ cell_scratch, output_gate_scratch, scaling_factors,
+ product_scaling_factors, recovered_cell_weights,
+ quantized_input_ptr_batch,
/*quantized_aux_input_ptr_batch=*/nullptr, quantized_output_state_ptr,
quantized_cell_state_ptr, output_state_ptr, cell_state_ptr,
output_ptr_batch);
@@ -476,8 +478,9 @@ void LstmStep(
const float* output_gate_bias_ptr, const int8_t* projection_weights_ptr,
float projection_weights_scale, const float* projection_bias_ptr,
const TfLiteLSTMParams* params, int n_batch, int n_cell, int n_input,
- int n_output, float* input_gate_scratch, float* forget_gate_scratch,
- float* cell_scratch, float* output_gate_scratch, float* scaling_factors,
+ int n_aux_input, int n_output, float* input_gate_scratch,
+ float* forget_gate_scratch, float* cell_scratch,
+ float* output_gate_scratch, float* scaling_factors,
float* product_scaling_factors, float* recovered_cell_weights,
int8_t* quantized_input_ptr_batch,
int8_t* quantized_aux_input_ptr_batch,
diff --git a/tensorflow/contrib/lite/kernels/internal/kernel_utils.h b/tensorflow/contrib/lite/kernels/internal/kernel_utils.h
index 38436c1382..215ad04add 100644
--- a/tensorflow/contrib/lite/kernels/internal/kernel_utils.h
+++ b/tensorflow/contrib/lite/kernels/internal/kernel_utils.h
@@ -131,8 +131,8 @@ void LstmStepWithAuxInput(
const float* forget_gate_bias_ptr, const float* cell_bias_ptr,
const float* output_gate_bias_ptr, const float* projection_weights_ptr,
const float* projection_bias_ptr, const TfLiteLSTMParams* params,
- int n_batch, int n_cell, int n_input, int n_output, float* output_state_ptr,
- float* cell_state_ptr, float* input_gate_scratch,
+ int n_batch, int n_cell, int n_input, int n_aux_input, int n_output,
+ float* output_state_ptr, float* cell_state_ptr, float* input_gate_scratch,
float* forget_gate_scratch, float* cell_scratch, float* output_gate_scratch,
float* output_ptr_batch);
@@ -252,12 +252,13 @@ void LstmStepWithAuxInput(
const float* output_gate_bias_ptr, const int8_t* projection_weights_ptr,
float projection_weights_scale, const float* projection_bias_ptr,
const TfLiteLSTMParams* params, int n_batch, int n_cell, int n_input,
- int n_output, float* input_gate_scratch, float* forget_gate_scratch,
- float* cell_scratch, float* output_gate_scratch, float* scaling_factors,
- float* product_scaling_factors, float* recovered_cell_weights,
- int8_t* quantized_input_ptr_batch, int8_t* quantized_aux_input_ptr_batch,
- int8_t* quantized_output_state_ptr, int8_t* quantized_cell_state_ptr,
- float* output_state_ptr, float* cell_state_ptr, float* output_ptr_batch);
+ int n_aux_input, int n_output, float* input_gate_scratch,
+ float* forget_gate_scratch, float* cell_scratch, float* output_gate_scratch,
+ float* scaling_factors, float* product_scaling_factors,
+ float* recovered_cell_weights, int8_t* quantized_input_ptr_batch,
+ int8_t* quantized_aux_input_ptr_batch, int8_t* quantized_output_state_ptr,
+ int8_t* quantized_cell_state_ptr, float* output_state_ptr,
+ float* cell_state_ptr, float* output_ptr_batch);
} // namespace kernel_utils
} // namespace tflite
diff --git a/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h b/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
index 00f9616cc2..a027a47726 100644
--- a/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
+++ b/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
@@ -3398,10 +3398,12 @@ inline void Tanh(const int16* input_data, const RuntimeShape& input_shape,
}
}
-inline void Dequantize(const uint8* input_data, const Dims<4>& input_dims,
- int32 zero_point, double scale, float* output_data,
- const Dims<4>& output_dims) {
- const int flat_size = MatchingFlatSize(output_dims, input_dims);
+inline void Dequantize(const tflite::DequantizationParams& op_params,
+ const RuntimeShape& input_shape, const uint8* input_data,
+ const RuntimeShape& output_shape, float* output_data) {
+ int32 zero_point = op_params.zero_point;
+ double scale = op_params.scale;
+ const int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; i++) {
int32 val = input_data[i];
@@ -3410,9 +3412,25 @@ inline void Dequantize(const uint8* input_data, const Dims<4>& input_dims,
}
}
-inline void FakeQuant(const float* input_data, const Dims<4>& input_dims,
- float rmin, float rmax, int num_bits, float* output_data,
- const Dims<4>& output_dims) {
+// TODO(b/80418076): Move to legacy ops file, update invocations.
+// Legacy Dims<4>.
+inline void Dequantize(const uint8* input_data, const Dims<4>& input_dims,
+ int32 zero_point, double scale, float* output_data,
+ const Dims<4>& output_dims) {
+ tflite::DequantizationParams op_params;
+ op_params.zero_point = zero_point;
+ op_params.scale = scale;
+
+ Dequantize(op_params, DimsToShape(input_dims), input_data,
+ DimsToShape(output_dims), output_data);
+}
+
+inline void FakeQuant(const tflite::FakeQuantParams& op_params,
+ const RuntimeShape& input_shape, const float* input_data,
+ const RuntimeShape& output_shape, float* output_data) {
+ float rmin = op_params.minmax.min;
+ float rmax = op_params.minmax.max;
+ int num_bits = op_params.num_bits;
// 0 should always be a representable value. Let's assume that the initial
// min,max range contains 0.
TFLITE_DCHECK_LE(rmin, 0.0f);
@@ -3425,11 +3443,25 @@ inline void FakeQuant(const float* input_data, const Dims<4>& input_dims,
float nudged_min, nudged_max, nudged_scale;
NudgeQuantizationRange(rmin, rmax, quant_min, quant_max, &nudged_min,
&nudged_max, &nudged_scale);
- const int flat_size = MatchingFlatSize(output_dims, input_dims);
+ const int flat_size = MatchingFlatSize(input_shape, output_shape);
FakeQuantizeArray(nudged_scale, nudged_min, nudged_max, input_data,
output_data, flat_size);
}
+// TODO(b/80418076): Move to legacy ops file, update invocations.
+// Legacy Dims<4>.
+inline void FakeQuant(const float* input_data, const Dims<4>& input_dims,
+ float rmin, float rmax, int num_bits, float* output_data,
+ const Dims<4>& output_dims) {
+ tflite::FakeQuantParams op_params;
+ op_params.num_bits = num_bits;
+ op_params.minmax.min = rmin;
+ op_params.minmax.max = rmax;
+
+ FakeQuant(op_params, DimsToShape(input_dims), input_data,
+ DimsToShape(output_dims), output_data);
+}
+
template <typename SrcT, typename DstT>
inline void Cast(const RuntimeShape& input_shape, const SrcT* input_data,
const RuntimeShape& output_shape, DstT* output_data) {
@@ -4050,22 +4082,32 @@ inline bool Mean(const T* input_data, const int* input_dims,
}
template <typename T>
-inline void Mean(const T* input_data, const Dims<4>& input_dims,
- const std::vector<int>& reduction_indices, T* output_data,
- const Dims<4>& output_dims) {
- const int output_batch = ArraySize(output_dims, 3);
- const int output_height = ArraySize(output_dims, 2);
- const int output_width = ArraySize(output_dims, 1);
- const int output_depth = ArraySize(output_dims, 0);
+inline void Mean(const tflite::MeanParams& op_params,
+ const RuntimeShape& unextended_input_shape,
+ const T* input_data,
+ const RuntimeShape& unextended_output_shape, T* output_data) {
+ gemmlowp::ScopedProfilingLabel label("Mean");
- const int input_height = ArraySize(input_dims, 2);
- const int input_width = ArraySize(input_dims, 1);
+ TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4);
+ TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4);
+ RuntimeShape input_shape =
+ RuntimeShape::ExtendedShape(4, unextended_input_shape);
+ RuntimeShape output_shape =
+ RuntimeShape::ExtendedShape(4, unextended_output_shape);
+
+ const int output_batch = output_shape.Dims(0);
+ const int output_height = output_shape.Dims(1);
+ const int output_width = output_shape.Dims(2);
+ const int output_depth = output_shape.Dims(3);
+
+ const int input_height = input_shape.Dims(1);
+ const int input_width = input_shape.Dims(2);
// The current implementation only supports simultaneous reduction over
// width and height.
- TFLITE_DCHECK_EQ(reduction_indices.size(), 2);
- TFLITE_DCHECK((reduction_indices[0] == 1 && reduction_indices[1] == 2) ||
- (reduction_indices[0] == 2 && reduction_indices[1] == 1));
+ TFLITE_DCHECK_EQ(op_params.axis_count, 2);
+ TFLITE_DCHECK((op_params.axis[0] == 1 && op_params.axis[1] == 2) ||
+ (op_params.axis[0] == 2 && op_params.axis[1] == 1));
TFLITE_DCHECK_EQ(output_height, 1);
TFLITE_DCHECK_EQ(output_width, 1);
@@ -4074,15 +4116,31 @@ inline void Mean(const T* input_data, const Dims<4>& input_dims,
float value = 0;
for (int in_h = 0; in_h < input_height; ++in_h) {
for (int in_w = 0; in_w < input_width; ++in_w) {
- value += input_data[Offset(input_dims, out_d, in_w, in_h, out_b)];
+ value += input_data[Offset(input_shape, out_b, in_h, in_w, out_d)];
}
}
- output_data[Offset(output_dims, out_d, 0, 0, out_b)] =
+ output_data[Offset(output_shape, out_b, 0, 0, out_d)] =
value / (input_width * input_height);
}
}
}
+// TODO(b/80418076): Move to legacy ops file, update invocations.
+// Legacy Dims<4>.
+template <typename T>
+inline void Mean(const T* input_data, const Dims<4>& input_dims,
+ const std::vector<int>& reduction_indices, T* output_data,
+ const Dims<4>& output_dims) {
+ tflite::MeanParams op_params;
+ op_params.axis_count = reduction_indices.size();
+ for (int i = 0; i < op_params.axis_count; ++i) {
+ op_params.axis[i] = reduction_indices[op_params.axis_count - 1 - i];
+ }
+
+ Mean(op_params, DimsToShape(input_dims), input_data, DimsToShape(output_dims),
+ output_data);
+}
+
// Computes the mean of elements across dimensions given in axis.
// It does so in two stages, first calculates the sum of elements along the axis
// then divides it by the number of element in axis for quantized values.
diff --git a/tensorflow/contrib/lite/kernels/internal/types.h b/tensorflow/contrib/lite/kernels/internal/types.h
index 9f6e74a267..c4c7cf3842 100644
--- a/tensorflow/contrib/lite/kernels/internal/types.h
+++ b/tensorflow/contrib/lite/kernels/internal/types.h
@@ -769,6 +769,11 @@ struct DepthwiseParams {
int32 output_activation_max;
};
+struct DequantizationParams {
+ double scale;
+ int32 zero_point;
+};
+
struct FakeQuantParams {
MinMax minmax;
int32 num_bits;
diff --git a/tensorflow/contrib/lite/nnapi_delegate.cc b/tensorflow/contrib/lite/nnapi_delegate.cc
index 602f3ee5d2..484842713d 100644
--- a/tensorflow/contrib/lite/nnapi_delegate.cc
+++ b/tensorflow/contrib/lite/nnapi_delegate.cc
@@ -64,6 +64,14 @@ void logError(const char* format, ...) {
__LINE__); \
}
+#define RETURN_ERROR_IF_TFLITE_FAILED(x) \
+ if (x != kTfLiteOk) { \
+ logError( \
+ "Returning error since TFLite returned failure nnapi_delegate.cc:%d.", \
+ __LINE__); \
+ return kTfLiteError; \
+ }
+
#define RETURN_ERROR_IF_NN_FAILED(x) \
if (x != ANEURALNETWORKS_NO_ERROR) { \
logError( \
@@ -299,17 +307,21 @@ TfLiteStatus AddOpsAndParams(
};
auto check_and_add_activation = [&add_scalar_int32](int activation) {
if (activation > kTfLiteActRelu6) {
- FATAL("NNAPI only supports RELU, RELU1 and RELU6 activations");
+ logError("NNAPI only supports RELU, RELU1 and RELU6 activations");
+ return kTfLiteError;
}
add_scalar_int32(activation);
+ return kTfLiteOk;
};
auto add_add_params = [&add_scalar_int32](void* data) {
auto* builtin = reinterpret_cast<TfLiteAddParams*>(data);
if (builtin->activation > kTfLiteActRelu6) {
- FATAL("NNAPI only supports RELU, RELU1 and RELU6 activations");
+ logError("NNAPI only supports RELU, RELU1 and RELU6 activations");
+ return kTfLiteError;
}
add_scalar_int32(builtin->activation);
+ return kTfLiteOk;
};
auto add_pooling_params = [&add_scalar_int32,
@@ -320,7 +332,7 @@ TfLiteStatus AddOpsAndParams(
add_scalar_int32(builtin->stride_height);
add_scalar_int32(builtin->filter_width);
add_scalar_int32(builtin->filter_height);
- check_and_add_activation(builtin->activation);
+ return check_and_add_activation(builtin->activation);
};
auto add_convolution_params = [&add_scalar_int32,
@@ -329,7 +341,7 @@ TfLiteStatus AddOpsAndParams(
add_scalar_int32(builtin->padding);
add_scalar_int32(builtin->stride_width);
add_scalar_int32(builtin->stride_height);
- check_and_add_activation(builtin->activation);
+ return check_and_add_activation(builtin->activation);
};
auto add_depthwise_conv_params = [&add_scalar_int32,
@@ -339,20 +351,22 @@ TfLiteStatus AddOpsAndParams(
add_scalar_int32(builtin->stride_width);
add_scalar_int32(builtin->stride_height);
add_scalar_int32(builtin->depth_multiplier);
- check_and_add_activation(builtin->activation);
+ return check_and_add_activation(builtin->activation);
};
auto add_fully_connected_params = [&check_and_add_activation](void* data) {
auto builtin = reinterpret_cast<TfLiteFullyConnectedParams*>(data);
- check_and_add_activation(builtin->activation);
+ return check_and_add_activation(builtin->activation);
};
auto add_concatenation_params = [&add_scalar_int32](void* data) {
auto builtin = reinterpret_cast<TfLiteConcatenationParams*>(data);
add_scalar_int32(builtin->axis);
if (builtin->activation != kTfLiteActNone) {
- FATAL("Concatenation does not support fused activation in NNAPI");
+ logError("Concatenation does not support fused activation in NNAPI");
+ return kTfLiteError;
}
+ return kTfLiteOk;
};
auto add_softmax_params = [&add_scalar_float32](void* data) {
@@ -433,22 +447,22 @@ TfLiteStatus AddOpsAndParams(
switch (builtin) {
case tflite::BuiltinOperator_ADD:
nn_op_type = ANEURALNETWORKS_ADD;
- add_add_params(node.builtin_data);
+ RETURN_ERROR_IF_TFLITE_FAILED(add_add_params(node.builtin_data));
break;
case tflite::BuiltinOperator_MUL:
nn_op_type = ANEURALNETWORKS_MUL;
- add_add_params(node.builtin_data);
+ RETURN_ERROR_IF_TFLITE_FAILED(add_add_params(node.builtin_data));
break;
case tflite::BuiltinOperator_AVERAGE_POOL_2D:
- add_pooling_params(node.builtin_data);
+ RETURN_ERROR_IF_TFLITE_FAILED(add_pooling_params(node.builtin_data));
nn_op_type = ANEURALNETWORKS_AVERAGE_POOL_2D;
break;
case tflite::BuiltinOperator_MAX_POOL_2D:
- add_pooling_params(node.builtin_data);
+ RETURN_ERROR_IF_TFLITE_FAILED(add_pooling_params(node.builtin_data));
nn_op_type = ANEURALNETWORKS_MAX_POOL_2D;
break;
case tflite::BuiltinOperator_L2_POOL_2D:
- add_pooling_params(node.builtin_data);
+ RETURN_ERROR_IF_TFLITE_FAILED(add_pooling_params(node.builtin_data));
nn_op_type = ANEURALNETWORKS_L2_POOL_2D;
break;
case tflite::BuiltinOperator_CONV_2D: {
@@ -459,7 +473,8 @@ TfLiteStatus AddOpsAndParams(
return kTfLiteError;
}
}
- add_convolution_params(node.builtin_data);
+ RETURN_ERROR_IF_TFLITE_FAILED(
+ add_convolution_params(node.builtin_data));
nn_op_type = ANEURALNETWORKS_CONV_2D;
break;
case tflite::BuiltinOperator_RELU:
@@ -478,11 +493,13 @@ TfLiteStatus AddOpsAndParams(
nn_op_type = ANEURALNETWORKS_LOGISTIC;
break;
case tflite::BuiltinOperator_DEPTHWISE_CONV_2D:
- add_depthwise_conv_params(node.builtin_data);
+ RETURN_ERROR_IF_TFLITE_FAILED(
+ add_depthwise_conv_params(node.builtin_data));
nn_op_type = ANEURALNETWORKS_DEPTHWISE_CONV_2D;
break;
case tflite::BuiltinOperator_CONCATENATION:
- add_concatenation_params(node.builtin_data);
+ RETURN_ERROR_IF_TFLITE_FAILED(
+ add_concatenation_params(node.builtin_data));
nn_op_type = ANEURALNETWORKS_CONCATENATION;
break;
case tflite::BuiltinOperator_SOFTMAX:
@@ -490,7 +507,8 @@ TfLiteStatus AddOpsAndParams(
nn_op_type = ANEURALNETWORKS_SOFTMAX;
break;
case tflite::BuiltinOperator_FULLY_CONNECTED:
- add_fully_connected_params(node.builtin_data);
+ RETURN_ERROR_IF_TFLITE_FAILED(
+ add_fully_connected_params(node.builtin_data));
nn_op_type = ANEURALNETWORKS_FULLY_CONNECTED;
break;
case tflite::BuiltinOperator_RESHAPE:
@@ -544,14 +562,14 @@ TfLiteStatus AddOpsAndParams(
case tflite::BuiltinOperator_DIV:
nnapi_version = 11; // require NNAPI 1.1
nn_op_type = ANEURALNETWORKS_DIV;
- check_and_add_activation(
- reinterpret_cast<TfLiteDivParams*>(node.builtin_data)->activation);
+ RETURN_ERROR_IF_TFLITE_FAILED(check_and_add_activation(
+ reinterpret_cast<TfLiteDivParams*>(node.builtin_data)->activation));
break;
case tflite::BuiltinOperator_SUB:
nnapi_version = 11; // require NNAPI 1.1
nn_op_type = ANEURALNETWORKS_SUB;
- check_and_add_activation(
- reinterpret_cast<TfLiteSubParams*>(node.builtin_data)->activation);
+ RETURN_ERROR_IF_TFLITE_FAILED(check_and_add_activation(
+ reinterpret_cast<TfLiteSubParams*>(node.builtin_data)->activation));
break;
case tflite::BuiltinOperator_SQUEEZE:
nnapi_version = 11; // requires NNAPI 1.1
@@ -664,7 +682,8 @@ TfLiteStatus AddOpsAndParams(
}
if (nnapi_version == 11 && GetAndroidSdkVersionCached() < 28) {
- FATAL("Op %d needs NNAPI1.1", builtin);
+ logError("Op %d needs NNAPI1.1", builtin);
+ return kTfLiteError;
}
// Add the operation.
@@ -712,9 +731,9 @@ TfLiteStatus NNAPIDelegate::BuildGraph(Interpreter* interpreter) {
interpreter->outputs().size());
uint32_t next_id = 0;
- RETURN_ERROR_IF_NN_FAILED(addTensorOperands(
+ RETURN_ERROR_IF_TFLITE_FAILED(addTensorOperands(
interpreter, nn_model_, &next_id, &tensor_id_to_nnapi_id));
- RETURN_ERROR_IF_NN_FAILED(
+ RETURN_ERROR_IF_TFLITE_FAILED(
AddOpsAndParams(interpreter, nn_model_, next_id, &model_states_inputs_,
&model_states_outputs_, tensor_id_to_nnapi_id));
diff --git a/tensorflow/contrib/lite/nnapi_delegate_disabled.cc b/tensorflow/contrib/lite/nnapi_delegate_disabled.cc
index efde72b1a7..e3536d3db6 100644
--- a/tensorflow/contrib/lite/nnapi_delegate_disabled.cc
+++ b/tensorflow/contrib/lite/nnapi_delegate_disabled.cc
@@ -27,7 +27,13 @@ NNAPIAllocation::NNAPIAllocation(const char* filename,
NNAPIAllocation::~NNAPIAllocation() {}
-NNAPIDelegate::~NNAPIDelegate() {}
+NNAPIDelegate::~NNAPIDelegate() {
+#define UNUSED_MEMBER(x) (void)(x)
+ UNUSED_MEMBER(nn_model_);
+ UNUSED_MEMBER(nn_compiled_model_);
+ UNUSED_MEMBER(model_status_);
+#undef UNUSED_MEMBER
+}
TfLiteStatus NNAPIDelegate::BuildGraph(Interpreter* interpreter) {
return kTfLiteError;
diff --git a/tensorflow/contrib/lite/python/BUILD b/tensorflow/contrib/lite/python/BUILD
index 6e30251eff..57e1290e07 100644
--- a/tensorflow/contrib/lite/python/BUILD
+++ b/tensorflow/contrib/lite/python/BUILD
@@ -70,7 +70,7 @@ py_library(
py_test(
name = "lite_test",
srcs = ["lite_test.py"],
- data = ["@tflite_mobilenet_ssd_quant_protobuf//:tflite_graph.pbtxt"],
+ data = ["@tflite_mobilenet_ssd_quant_protobuf//:tflite_graph.pb"],
srcs_version = "PY2AND3",
tags = [
"no_oss",
diff --git a/tensorflow/contrib/lite/python/lite.py b/tensorflow/contrib/lite/python/lite.py
index 2de97fec86..44dfb97b84 100644
--- a/tensorflow/contrib/lite/python/lite.py
+++ b/tensorflow/contrib/lite/python/lite.py
@@ -58,6 +58,7 @@ from tensorflow.python.framework import graph_util as _tf_graph_util
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework.errors_impl import NotFoundError as _NotFoundError
from tensorflow.python.framework.importer import import_graph_def as _import_graph_def
+from tensorflow.python.lib.io import file_io as _file_io
from tensorflow.python.saved_model import signature_constants as _signature_constants
from tensorflow.python.saved_model import tag_constants as _tag_constants
@@ -225,8 +226,10 @@ class TocoConverter(object):
TocoConverter class.
Raises:
- ValueError:
+ IOError:
+ File not found.
Unable to parse input file.
+ ValueError:
The graph is not frozen.
input_arrays or output_arrays contains an invalid tensor name.
input_shapes is not correctly defined when required
@@ -234,10 +237,13 @@ class TocoConverter(object):
with _ops.Graph().as_default():
with _session.Session() as sess:
# Read GraphDef from file.
- graph_def = _graph_pb2.GraphDef()
- with open(graph_def_file, "rb") as f:
+ if not _file_io.file_exists(graph_def_file):
+ raise IOError("File '{0}' does not exist.".format(graph_def_file))
+ with _file_io.FileIO(graph_def_file, "rb") as f:
file_content = f.read()
+
try:
+ graph_def = _graph_pb2.GraphDef()
graph_def.ParseFromString(file_content)
except (_text_format.ParseError, DecodeError):
try:
@@ -248,9 +254,10 @@ class TocoConverter(object):
file_content = file_content.decode("utf-8")
else:
file_content = file_content.encode("utf-8")
+ graph_def = _graph_pb2.GraphDef()
_text_format.Merge(file_content, graph_def)
except (_text_format.ParseError, DecodeError):
- raise ValueError(
+ raise IOError(
"Unable to parse input file '{}'.".format(graph_def_file))
# Handles models with custom TFLite ops that cannot be resolved in
diff --git a/tensorflow/contrib/lite/python/lite_test.py b/tensorflow/contrib/lite/python/lite_test.py
index 1c94ba605a..3f8ea433ff 100644
--- a/tensorflow/contrib/lite/python/lite_test.py
+++ b/tensorflow/contrib/lite/python/lite_test.py
@@ -521,14 +521,21 @@ class FromFrozenGraphFile(test_util.TensorFlowTestCase):
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
- def testInvalidFile(self):
+ def testInvalidFileNotFound(self):
+ with self.assertRaises(IOError) as error:
+ lite.TocoConverter.from_frozen_graph('invalid_file', ['Placeholder'],
+ ['add'])
+ self.assertEqual('File \'invalid_file\' does not exist.',
+ str(error.exception))
+
+ def testInvalidFileBadData(self):
graph_def_file = os.path.join(self.get_temp_dir(), 'invalid_file')
with gfile.Open(graph_def_file, 'wb') as temp_file:
temp_file.write('bad data')
temp_file.flush()
# Attempts to convert the invalid model.
- with self.assertRaises(ValueError) as error:
+ with self.assertRaises(IOError) as error:
lite.TocoConverter.from_frozen_graph(graph_def_file, ['Placeholder'],
['add'])
self.assertEqual(
@@ -539,7 +546,7 @@ class FromFrozenGraphFile(test_util.TensorFlowTestCase):
def _initObjectDetectionArgs(self):
# Initializes the arguments required for the object detection model.
self._graph_def_file = resource_loader.get_path_to_datafile(
- 'testdata/tflite_graph.pbtxt')
+ 'testdata/tflite_graph.pb')
self._input_arrays = ['normalized_input_image_tensor']
self._output_arrays = [
'TFLite_Detection_PostProcess', 'TFLite_Detection_PostProcess:1',
@@ -586,7 +593,7 @@ class FromFrozenGraphFile(test_util.TensorFlowTestCase):
output_details[3]['name'])
self.assertTrue(([1] == output_details[3]['shape']).all())
- def testTFLiteGraphDefInvalid(self):
+ def testTFLiteGraphDefMissingShape(self):
# Tests invalid cases for the model that cannot be loaded in TensorFlow.
self._initObjectDetectionArgs()
@@ -597,6 +604,10 @@ class FromFrozenGraphFile(test_util.TensorFlowTestCase):
self.assertEqual('input_shapes must be defined for this model.',
str(error.exception))
+ def testTFLiteGraphDefInvalidShape(self):
+ # Tests invalid cases for the model that cannot be loaded in TensorFlow.
+ self._initObjectDetectionArgs()
+
# `input_shapes` does not contain the names in `input_arrays`.
with self.assertRaises(ValueError) as error:
lite.TocoConverter.from_frozen_graph(
diff --git a/tensorflow/contrib/lite/testing/BUILD b/tensorflow/contrib/lite/testing/BUILD
index 89912fd116..0b3a97d4f5 100644
--- a/tensorflow/contrib/lite/testing/BUILD
+++ b/tensorflow/contrib/lite/testing/BUILD
@@ -173,6 +173,7 @@ tf_cc_test(
srcs = ["tflite_driver_test.cc"],
data = ["//tensorflow/contrib/lite:testdata/multi_add.bin"],
tags = [
+ "no_oss", # b/112769036
"tflite_not_portable_android",
"tflite_not_portable_ios",
],
diff --git a/tensorflow/contrib/lite/toco/g3doc/toco_landscape.svg b/tensorflow/contrib/lite/toco/g3doc/toco_landscape.svg
index 262e13a591..335debde57 100644
--- a/tensorflow/contrib/lite/toco/g3doc/toco_landscape.svg
+++ b/tensorflow/contrib/lite/toco/g3doc/toco_landscape.svg
@@ -1 +1 @@
-<svg version="1.1" viewBox="0.0 0.0 720.0 540.0" fill="none" stroke="none" stroke-linecap="square" stroke-miterlimit="10" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns="http://www.w3.org/2000/svg"><clipPath id="p.0"><path d="m0 0l720.0 0l0 540.0l-720.0 0l0 -540.0z" clip-rule="nonzero"/></clipPath><g clip-path="url(#p.0)"><path fill="#000000" fill-opacity="0.0" d="m0 0l720.0 0l0 540.0l-720.0 0z" fill-rule="evenodd"/><path fill="#f3f3f3" d="m19.375328 28.750656l361.6378 0l0 358.01575l-361.6378 0z" fill-rule="evenodd"/><path stroke="#cccccc" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m19.375328 28.750656l361.6378 0l0 358.01575l-361.6378 0z" fill-rule="evenodd"/><path fill="#434343" d="m338.49512 374.66016q-0.609375 0 -1.171875 -0.140625q-0.546875 -0.15625 -0.96875 -0.421875q-0.25 -0.15625 -0.359375 -0.296875q-0.09375 -0.140625 -0.09375 -0.34375q0 -0.171875 0.09375 -0.28125q0.109375 -0.109375 0.265625 -0.109375q0.171875 0 0.46875 0.1875q0.40625 0.25 0.796875 0.390625q0.390625 0.140625 0.984375 0.140625q0.71875 0 1.109375 -0.25q0.40625 -0.265625 0.40625 -0.734375q0 -0.296875 -0.15625 -0.46875q-0.140625 -0.1875 -0.5 -0.328125q-0.359375 -0.140625 -1.046875 -0.296875q-1.171875 -0.25 -1.6875 -0.671875q-0.5 -0.421875 -0.5 -1.15625q0 -0.578125 0.3125 -1.015625q0.328125 -0.4375 0.890625 -0.6875q0.5625 -0.265625 1.28125 -0.265625q0.53125 0 1.015625 0.140625q0.484375 0.140625 0.859375 0.390625q0.453125 0.328125 0.453125 0.671875q0 0.171875 -0.109375 0.296875q-0.109375 0.125 -0.25 0.125q-0.15625 0 -0.484375 -0.234375q-0.375 -0.234375 -0.703125 -0.359375q-0.328125 -0.140625 -0.828125 -0.140625q-0.625 0 -1.015625 0.28125q-0.375 0.265625 -0.375 0.734375q0 0.296875 0.140625 0.484375q0.140625 0.171875 0.46875 0.3125q0.328125 0.140625 0.9375 0.28125q0.90625 0.1875 1.40625 0.4375q0.5 0.234375 0.703125 0.578125q0.21875 0.34375 0.21875 0.890625q0 0.828125 -0.703125 1.34375q-0.703125 0.515625 -1.859375 0.515625zm9.241241 -1.59375q0.140625 0 0.25 0.125q0.109375 0.109375 0.109375 0.296875q0 0.328125 -0.46875 0.609375q-0.484375 0.28125 -1.015625 0.421875q-0.53125 0.140625 -1.046875 0.140625q-1.5 0 -2.375 -0.890625q-0.875 -0.890625 -0.875 -2.46875q0 -1.0 0.390625 -1.765625q0.390625 -0.765625 1.078125 -1.1875q0.703125 -0.4375 1.59375 -0.4375q1.265625 0 2.015625 0.828125q0.75 0.828125 0.75 2.25q0 0.265625 -0.109375 0.390625q-0.109375 0.109375 -0.34375 0.109375l-4.296875 0q0.125 2.296875 2.171875 2.296875q0.53125 0 0.890625 -0.140625q0.375 -0.140625 0.8125 -0.390625q0.34375 -0.1875 0.46875 -0.1875zm-2.34375 -4.3125q-0.84375 0 -1.359375 0.53125q-0.515625 0.53125 -0.609375 1.515625l3.765625 0q-0.015625 -1.0 -0.484375 -1.515625q-0.46875 -0.53125 -1.3125 -0.53125zm7.5551147 -0.8125q0.546875 -0.03125 0.546875 0.453125q0 0.21875 -0.125 0.34375q-0.109375 0.125 -0.40625 0.15625l-0.390625 0.03125q-0.890625 0.078125 -1.328125 0.640625q-0.4375 0.546875 -0.4375 1.296875l0 3.234375q0 0.265625 -0.15625 0.40625q-0.140625 0.125 -0.375 0.125q-0.234375 0 -0.390625 -0.140625q-0.15625 -0.140625 -0.15625 -0.390625l0 -5.625q0 -0.25 0.15625 -0.390625q0.15625 -0.140625 0.390625 -0.140625q0.21875 0 0.359375 0.140625q0.140625 0.140625 0.140625 0.375l0 0.75q0.28125 -0.578125 0.796875 -0.890625q0.515625 -0.3125 1.1875 -0.359375l0.1875 -0.015625zm6.157959 0.328125q0.15625 -0.3125 0.46875 -0.3125q0.203125 0 0.359375 0.140625q0.15625 0.125 0.15625 0.328125q0 0.109375 -0.046875 0.203125l-2.59375 5.609375q-0.078125 0.171875 -0.25 0.28125q-0.15625 0.09375 -0.34375 0.09375q-0.171875 0 -0.328125 -0.09375q-0.15625 -0.109375 -0.25 -0.28125l-2.59375 -5.609375q-0.046875 -0.09375 -0.046875 -0.1875q0 -0.203125 0.171875 -0.34375q0.1875 -0.15625 0.390625 -0.15625q0.140625 0 0.265625 0.078125q0.125 0.078125 0.1875 0.234375l2.234375 5.0l2.21875 -4.984375zm7.2099915 4.796875q0.140625 0 0.25 0.125q0.109375 0.109375 0.109375 0.296875q0 0.328125 -0.46875 0.609375q-0.484375 0.28125 -1.015625 0.421875q-0.53125 0.140625 -1.046875 0.140625q-1.5 0 -2.375 -0.890625q-0.875 -0.890625 -0.875 -2.46875q0 -1.0 0.390625 -1.765625q0.390625 -0.765625 1.078125 -1.1875q0.703125 -0.4375 1.59375 -0.4375q1.265625 0 2.015625 0.828125q0.75 0.828125 0.75 2.25q0 0.265625 -0.109375 0.390625q-0.109375 0.109375 -0.34375 0.109375l-4.296875 0q0.125 2.296875 2.171875 2.296875q0.53125 0 0.890625 -0.140625q0.375 -0.140625 0.8125 -0.390625q0.34375 -0.1875 0.46875 -0.1875zm-2.34375 -4.3125q-0.84375 0 -1.359375 0.53125q-0.515625 0.53125 -0.609375 1.515625l3.765625 0q-0.015625 -1.0 -0.484375 -1.515625q-0.46875 -0.53125 -1.3125 -0.53125zm7.5551453 -0.8125q0.546875 -0.03125 0.546875 0.453125q0 0.21875 -0.125 0.34375q-0.109375 0.125 -0.40625 0.15625l-0.390625 0.03125q-0.890625 0.078125 -1.328125 0.640625q-0.4375 0.546875 -0.4375 1.296875l0 3.234375q0 0.265625 -0.15625 0.40625q-0.140625 0.125 -0.375 0.125q-0.234375 0 -0.390625 -0.140625q-0.15625 -0.140625 -0.15625 -0.390625l0 -5.625q0 -0.25 0.15625 -0.390625q0.15625 -0.140625 0.390625 -0.140625q0.21875 0 0.359375 0.140625q0.140625 0.140625 0.140625 0.375l0 0.75q0.28125 -0.578125 0.796875 -0.890625q0.515625 -0.3125 1.1875 -0.359375l0.1875 -0.015625z" fill-rule="nonzero"/><path fill="#d9d9d9" d="m25.624672 36.249344l301.88977 0l0 69.98425l-301.88977 0z" fill-rule="evenodd"/><path stroke="#cccccc" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" stroke-dasharray="4.0,3.0" d="m25.624672 36.249344l301.88977 0l0 69.98425l-301.88977 0z" fill-rule="evenodd"/><path fill="#434343" d="m134.36497 56.831844q-0.234375 0 -0.375 -0.140625q-0.140625 -0.140625 -0.140625 -0.359375l0 -7.1875l-2.578125 0q-0.21875 0 -0.34375 -0.109375q-0.109375 -0.109375 -0.109375 -0.3125q0 -0.203125 0.109375 -0.296875q0.125 -0.109375 0.34375 -0.109375l6.15625 0q0.21875 0 0.328125 0.109375q0.125 0.09375 0.125 0.296875q0 0.203125 -0.125 0.3125q-0.109375 0.109375 -0.328125 0.109375l-2.578125 0l0 7.1875q0 0.21875 -0.140625 0.359375q-0.125 0.140625 -0.34375 0.140625zm9.004181 -1.421875q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375zm6.839676 -0.75q2.09375 0 2.09375 2.3125l0 3.25q0 0.234375 -0.125 0.359375q-0.125 0.125 -0.359375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -3.1875q0 -0.8125 -0.328125 -1.1875q-0.3125 -0.375 -1.0 -0.375q-0.8125 0 -1.296875 0.5q-0.46875 0.484375 -0.46875 1.328125l0 2.921875q0 0.234375 -0.125 0.359375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.21875 0.125 -0.34375q0.125 -0.140625 0.359375 -0.140625q0.21875 0 0.34375 0.140625q0.125 0.125 0.125 0.328125l0 0.609375q0.28125 -0.53125 0.796875 -0.8125q0.53125 -0.28125 1.1875 -0.28125zm5.84729 6.0625q-0.56248474 0 -1.0624847 -0.125q-0.5 -0.140625 -0.875 -0.375q-0.21875 -0.140625 -0.3125 -0.265625q-0.078125 -0.125 -0.078125 -0.3125q0 -0.15625 0.078125 -0.25q0.09375 -0.109375 0.234375 -0.109375q0.15625 0 0.421875 0.1875q0.359375 0.21875 0.71875 0.34375q0.359375 0.125 0.87498474 0.125q0.65625 0 1.015625 -0.21875q0.359375 -0.234375 0.359375 -0.671875q0 -0.265625 -0.140625 -0.421875q-0.125 -0.171875 -0.453125 -0.296875q-0.3125 -0.125 -0.9375 -0.25q-1.0624847 -0.234375 -1.5156097 -0.609375q-0.453125 -0.390625 -0.453125 -1.046875q0 -0.515625 0.28125 -0.90625q0.28125 -0.40625 0.796875 -0.625q0.515625 -0.234375 1.1562347 -0.234375q0.46875 0 0.90625 0.125q0.4375 0.125 0.78125 0.34375q0.40625 0.296875 0.40625 0.609375q0 0.15625 -0.09375 0.265625q-0.09375 0.109375 -0.234375 0.109375q-0.140625 0 -0.4375 -0.203125q-0.328125 -0.21875 -0.625 -0.34375q-0.296875 -0.125 -0.75 -0.125q-0.56248474 0 -0.90623474 0.265625q-0.34375 0.25 -0.34375 0.671875q0 0.25 0.125 0.421875q0.125 0.15625 0.421875 0.28125q0.296875 0.125 0.84373474 0.25q0.828125 0.1875 1.265625 0.40625q0.453125 0.203125 0.640625 0.515625q0.203125 0.3125 0.203125 0.796875q0 0.75 -0.640625 1.21875q-0.640625 0.453125 -1.671875 0.453125zm6.2131653 0q-0.828125 0 -1.46875 -0.359375q-0.625 -0.375 -0.96875 -1.0625q-0.34375 -0.703125 -0.34375 -1.609375q0 -0.90625 0.34375 -1.59375q0.34375 -0.703125 0.96875 -1.0625q0.640625 -0.375 1.46875 -0.375q0.828125 0 1.453125 0.375q0.640625 0.359375 0.984375 1.0625q0.34375 0.6875 0.34375 1.59375q0 0.90625 -0.34375 1.609375q-0.34375 0.6875 -0.984375 1.0625q-0.625 0.359375 -1.453125 0.359375zm0 -0.796875q0.859375 0 1.3125 -0.5625q0.46875 -0.578125 0.46875 -1.671875q0 -1.0625 -0.46875 -1.640625q-0.46875 -0.59375 -1.3125 -0.59375q-0.859375 0 -1.328125 0.59375q-0.46875 0.578125 -0.46875 1.640625q0 1.078125 0.453125 1.65625q0.46875 0.578125 1.34375 0.578125zm7.1288147 -5.25q0.5 -0.03125 0.5 0.40625q0 0.203125 -0.109375 0.3125q-0.109375 0.109375 -0.375 0.140625l-0.359375 0.03125q-0.796875 0.078125 -1.1875 0.578125q-0.390625 0.484375 -0.390625 1.15625l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.140625 -0.359375q0.140625 -0.125 0.34375 -0.125q0.1875 0 0.3125 0.125q0.140625 0.125 0.140625 0.34375l0 0.671875q0.25 -0.53125 0.71875 -0.796875q0.46875 -0.28125 1.0625 -0.328125l0.171875 -0.015625zm1.970398 6.03125q-0.21875 0 -0.359375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -7.546875q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.375 -0.125l4.375 0q0.203125 0 0.328125 0.109375q0.125 0.09375 0.125 0.296875q0 0.203125 -0.125 0.3125q-0.125 0.109375 -0.328125 0.109375l-3.90625 0l0 2.90625l3.65625 0q0.21875 0 0.328125 0.109375q0.125 0.109375 0.125 0.3125q0 0.1875 -0.125 0.296875q-0.109375 0.109375 -0.328125 0.109375l-3.65625 0l0 3.453125q0 0.21875 -0.125 0.359375q-0.125 0.140625 -0.359375 0.140625zm6.5434265 0q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -7.625q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.359375 -0.125q0.203125 0 0.34375 0.125q0.140625 0.125 0.140625 0.34375l0 7.625q0 0.234375 -0.140625 0.359375q-0.140625 0.125 -0.34375 0.125zm4.721527 0.015625q-0.828125 0 -1.46875 -0.359375q-0.625 -0.375 -0.96875 -1.0625q-0.34375 -0.703125 -0.34375 -1.609375q0 -0.90625 0.34375 -1.59375q0.34375 -0.703125 0.96875 -1.0625q0.640625 -0.375 1.46875 -0.375q0.828125 0 1.453125 0.375q0.640625 0.359375 0.984375 1.0625q0.34375 0.6875 0.34375 1.59375q0 0.90625 -0.34375 1.609375q-0.34375 0.6875 -0.984375 1.0625q-0.625 0.359375 -1.453125 0.359375zm0 -0.796875q0.859375 0 1.3125 -0.5625q0.46875 -0.578125 0.46875 -1.671875q0 -1.0625 -0.46875 -1.640625q-0.46875 -0.59375 -1.3125 -0.59375q-0.859375 0 -1.328125 0.59375q-0.46875 0.578125 -0.46875 1.640625q0 1.078125 0.453125 1.65625q0.46875 0.578125 1.34375 0.578125zm12.222534 -4.9375q0.125 -0.28125 0.390625 -0.28125q0.1875 0 0.328125 0.125q0.140625 0.109375 0.140625 0.296875q0 0.078125 -0.03125 0.171875l-1.984375 5.046875q-0.078125 0.15625 -0.21875 0.25q-0.140625 0.078125 -0.296875 0.078125q-0.15625 0 -0.296875 -0.078125q-0.140625 -0.09375 -0.21875 -0.25l-1.65625 -4.21875l-1.640625 4.21875q-0.0625 0.15625 -0.203125 0.25q-0.140625 0.078125 -0.3125 0.078125q-0.15625 0 -0.296875 -0.078125q-0.140625 -0.09375 -0.21875 -0.25l-1.984375 -5.03125q-0.046875 -0.09375 -0.046875 -0.171875q0 -0.1875 0.15625 -0.3125q0.171875 -0.140625 0.359375 -0.140625q0.296875 0 0.40625 0.296875l1.65625 4.421875l1.6875 -4.390625q0.078125 -0.15625 0.203125 -0.234375q0.125 -0.09375 0.265625 -0.09375q0.15625 0 0.28125 0.09375q0.125 0.078125 0.1875 0.234375l1.6875 4.375l1.65625 -4.40625zm12.637604 5.09375q0.046875 0.09375 0.046875 0.203125q0 0.171875 -0.140625 0.296875q-0.140625 0.125 -0.328125 0.125q-0.296875 0 -0.421875 -0.296875l-0.84375 -1.9375l-4.53125 0l-0.859375 1.9375q-0.125 0.296875 -0.421875 0.296875q-0.1875 0 -0.34375 -0.125q-0.140625 -0.125 -0.140625 -0.3125q0 -0.09375 0.046875 -0.1875l3.4375 -7.640625q0.078125 -0.15625 0.21875 -0.234375q0.140625 -0.09375 0.3125 -0.09375q0.171875 0 0.3125 0.09375q0.15625 0.078125 0.21875 0.234375l3.4375 7.640625zm-5.859375 -2.421875l3.8125 0l-1.90625 -4.3125l-1.90625 4.3125zm7.78656 3.046875q-0.21875 0 -0.359375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -7.546875q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.375 -0.125l2.84375 0q1.328125 0 2.0625 0.65625q0.75 0.640625 0.75 1.828125q0 1.1875 -0.75 1.84375q-0.734375 0.65625 -2.0625 0.65625l-2.359375 0l0 3.03125q0 0.21875 -0.140625 0.359375q-0.125 0.140625 -0.359375 0.140625zm2.765625 -4.34375q1.9375 0 1.9375 -1.6875q0 -1.671875 -1.9375 -1.671875l-2.265625 0l0 3.359375l2.265625 0zm4.9744263 4.34375q-0.21875 0 -0.359375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -7.578125q0 -0.234375 0.125 -0.359375q0.140625 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.140625q0.140625 0.125 0.140625 0.359375l0 7.578125q0 0.21875 -0.140625 0.359375q-0.125 0.140625 -0.359375 0.140625zm4.4157715 0.015625q-0.5625 0 -1.0625 -0.125q-0.5 -0.140625 -0.875 -0.375q-0.21875 -0.140625 -0.3125 -0.265625q-0.078125 -0.125 -0.078125 -0.3125q0 -0.15625 0.078125 -0.25q0.09375 -0.109375 0.234375 -0.109375q0.15625 0 0.421875 0.1875q0.359375 0.21875 0.71875 0.34375q0.359375 0.125 0.875 0.125q0.65625 0 1.015625 -0.21875q0.359375 -0.234375 0.359375 -0.671875q0 -0.265625 -0.140625 -0.421875q-0.125 -0.171875 -0.453125 -0.296875q-0.3125 -0.125 -0.9375 -0.25q-1.0625 -0.234375 -1.515625 -0.609375q-0.453125 -0.390625 -0.453125 -1.046875q0 -0.515625 0.28125 -0.90625q0.28125 -0.40625 0.796875 -0.625q0.515625 -0.234375 1.15625 -0.234375q0.46875 0 0.90625 0.125q0.4375 0.125 0.78125 0.34375q0.40625 0.296875 0.40625 0.609375q0 0.15625 -0.09375 0.265625q-0.09375 0.109375 -0.234375 0.109375q-0.140625 0 -0.4375 -0.203125q-0.328125 -0.21875 -0.625 -0.34375q-0.296875 -0.125 -0.75 -0.125q-0.5625 0 -0.90625 0.265625q-0.34375 0.25 -0.34375 0.671875q0 0.25 0.125 0.421875q0.125 0.15625 0.421875 0.28125q0.296875 0.125 0.84375 0.25q0.828125 0.1875 1.265625 0.40625q0.453125 0.203125 0.640625 0.515625q0.203125 0.3125 0.203125 0.796875q0 0.75 -0.640625 1.21875q-0.640625 0.453125 -1.671875 0.453125z" fill-rule="nonzero"/><path fill="#f3f3f3" d="m396.75067 183.75066l249.00787 0l0 203.02364l-249.00787 0z" fill-rule="evenodd"/><path stroke="#cccccc" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m396.75067 183.75066l249.00787 0l0 203.02364l-249.00787 0z" fill-rule="evenodd"/><path fill="#434343" d="m409.42255 374.66803q-0.90625 0 -1.609375 -0.40625q-0.6875 -0.421875 -1.078125 -1.171875q-0.375 -0.765625 -0.375 -1.765625q0 -1.0 0.390625 -1.765625q0.40625 -0.78125 1.109375 -1.203125q0.703125 -0.4375 1.625 -0.4375q0.5 0 1.0 0.140625q0.5 0.140625 0.875 0.40625q0.234375 0.171875 0.328125 0.328125q0.109375 0.140625 0.109375 0.328125q0 0.1875 -0.109375 0.3125q-0.09375 0.109375 -0.25 0.109375q-0.09375 0 -0.203125 -0.046875q-0.09375 -0.046875 -0.171875 -0.09375q-0.078125 -0.0625 -0.09375 -0.078125q-0.359375 -0.234375 -0.671875 -0.359375q-0.3125 -0.140625 -0.765625 -0.140625q-0.96875 0 -1.515625 0.671875q-0.53125 0.65625 -0.53125 1.828125q0 1.171875 0.53125 1.8125q0.546875 0.640625 1.515625 0.640625q0.453125 0 0.78125 -0.125q0.328125 -0.140625 0.65625 -0.375q0.15625 -0.09375 0.28125 -0.15625q0.140625 -0.0625 0.234375 -0.0625q0.140625 0 0.234375 0.125q0.109375 0.109375 0.109375 0.296875q0 0.171875 -0.09375 0.3125q-0.09375 0.140625 -0.34375 0.3125q-0.375 0.25 -0.90625 0.40625q-0.515625 0.15625 -1.0625 0.15625zm4.2591553 -0.03125q-0.234375 0 -0.390625 -0.140625q-0.15625 -0.140625 -0.15625 -0.390625l0 -8.46875q0 -0.25 0.15625 -0.390625q0.15625 -0.140625 0.390625 -0.140625q0.21875 0 0.375 0.140625q0.15625 0.140625 0.15625 0.390625l0 8.46875q0 0.25 -0.15625 0.390625q-0.15625 0.140625 -0.375 0.140625zm3.092102 0q-0.234375 0 -0.390625 -0.140625q-0.15625 -0.140625 -0.15625 -0.390625l0 -5.625q0 -0.25 0.15625 -0.390625q0.15625 -0.140625 0.390625 -0.140625q0.234375 0 0.375 0.140625q0.15625 0.140625 0.15625 0.390625l0 5.625q0 0.265625 -0.15625 0.40625q-0.140625 0.125 -0.375 0.125zm0 -8.09375q-0.3125 0 -0.515625 -0.171875q-0.203125 -0.1875 -0.203125 -0.5q0 -0.296875 0.203125 -0.484375q0.203125 -0.1875 0.515625 -0.1875q0.328125 0 0.515625 0.1875q0.203125 0.1875 0.203125 0.484375q0 0.3125 -0.203125 0.5q-0.1875 0.171875 -0.515625 0.171875zm7.5765076 6.53125q0.140625 0 0.25 0.125q0.109375 0.109375 0.109375 0.296875q0 0.328125 -0.46875 0.609375q-0.484375 0.28125 -1.015625 0.421875q-0.53125 0.140625 -1.046875 0.140625q-1.5 0 -2.375 -0.890625q-0.875 -0.890625 -0.875 -2.46875q0 -1.0 0.390625 -1.765625q0.390625 -0.765625 1.078125 -1.1875q0.703125 -0.4375 1.59375 -0.4375q1.265625 0 2.015625 0.828125q0.75 0.828125 0.75 2.25q0 0.265625 -0.109375 0.390625q-0.109375 0.109375 -0.34375 0.109375l-4.296875 0q0.125 2.296875 2.171875 2.296875q0.53125 0 0.890625 -0.140625q0.375 -0.140625 0.8125 -0.390625q0.34375 -0.1875 0.46875 -0.1875zm-2.34375 -4.3125q-0.84375 0 -1.359375 0.53125q-0.515625 0.53125 -0.609375 1.515625l3.765625 0q-0.015625 -1.0 -0.484375 -1.515625q-0.46875 -0.53125 -1.3125 -0.53125zm7.6020203 -0.84375q2.328125 0 2.328125 2.578125l0 3.609375q0 0.25 -0.140625 0.390625q-0.140625 0.140625 -0.390625 0.140625q-0.25 0 -0.40625 -0.140625q-0.140625 -0.140625 -0.140625 -0.390625l0 -3.546875q0 -0.90625 -0.359375 -1.3125q-0.34375 -0.421875 -1.125 -0.421875q-0.890625 0 -1.421875 0.546875q-0.53125 0.546875 -0.53125 1.484375l0 3.25q0 0.25 -0.140625 0.390625q-0.140625 0.140625 -0.390625 0.140625q-0.25 0 -0.40625 -0.140625q-0.140625 -0.140625 -0.140625 -0.390625l0 -5.625q0 -0.234375 0.140625 -0.375q0.15625 -0.15625 0.40625 -0.15625q0.234375 0 0.375 0.15625q0.140625 0.140625 0.140625 0.359375l0 0.6875q0.328125 -0.609375 0.890625 -0.921875q0.578125 -0.3125 1.3125 -0.3125zm7.304718 5.875q0.46875 0.03125 0.46875 0.421875q0 0.21875 -0.171875 0.34375q-0.171875 0.109375 -0.5 0.078125l-0.359375 -0.015625q-1.0625 -0.09375 -1.578125 -0.640625q-0.5 -0.5625 -0.5 -1.703125l0 -3.34375l-0.890625 0q-0.234375 0 -0.359375 -0.109375q-0.125 -0.109375 -0.125 -0.296875q0 -0.203125 0.125 -0.3125q0.125 -0.125 0.359375 -0.125l0.890625 0l0 -1.515625q0 -0.25 0.140625 -0.390625q0.15625 -0.140625 0.40625 -0.140625q0.234375 0 0.375 0.140625q0.15625 0.140625 0.15625 0.390625l0 1.515625l1.484375 0q0.203125 0 0.328125 0.125q0.140625 0.109375 0.140625 0.3125q0 0.1875 -0.140625 0.296875q-0.125 0.109375 -0.328125 0.109375l-1.484375 0l0 3.40625q0 0.734375 0.296875 1.0625q0.296875 0.3125 0.90625 0.359375l0.359375 0.03125z" fill-rule="nonzero"/><path fill="#f4cccc" d="m206.61942 201.17455l140.47244 0l0 30.992126l-140.47244 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m206.61942 201.17455l140.47244 0l0 30.992126l-140.47244 0z" fill-rule="evenodd"/><path fill="#000000" d="m237.0857 213.5031q-0.640625 0.046875 -0.96875 0.40625q-0.3125 0.34375 -0.3125 1.046875l0 0.390625l1.328125 0q0.203125 0 0.3125 0.109375q0.109375 0.109375 0.109375 0.28125q0 0.1875 -0.109375 0.28125q-0.109375 0.09375 -0.3125 0.09375l-1.328125 0l0 4.65625q0 0.234375 -0.140625 0.359375q-0.140625 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.140625 -0.125 -0.140625 -0.359375l0 -4.65625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -0.21875q0 -1.078125 0.53125 -1.6875q0.546875 -0.625 1.5625 -0.703125l0.3125 -0.015625q0.3125 -0.03125 0.453125 0.0625q0.140625 0.078125 0.140625 0.296875q0 0.34375 -0.421875 0.390625l-0.3125 0.03125zm4.248535 1.71875q0.5 -0.03125 0.5 0.40625q0 0.203125 -0.109375 0.3125q-0.109375 0.109375 -0.375 0.140625l-0.359375 0.03125q-0.796875 0.078125 -1.1875 0.578125q-0.390625 0.484375 -0.390625 1.15625l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.140625 -0.359375q0.140625 -0.125 0.34375 -0.125q0.1875 0 0.3125 0.125q0.140625 0.125 0.140625 0.34375l0 0.671875q0.25 -0.53125 0.71875 -0.796875q0.46875 -0.28125 1.0625 -0.328125l0.171875 -0.015625zm5.861023 4.609375q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375zm8.417801 3.875q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375zm8.199051 4.46875q0.203125 0 0.296875 0.109375q0.109375 0.09375 0.109375 0.265625q0 0.1875 -0.109375 0.296875q-0.09375 0.09375 -0.296875 0.09375l-4.203125 0q-0.203125 0 -0.34375 -0.125q-0.125 -0.125 -0.125 -0.3125q0 -0.1875 0.140625 -0.359375l3.546875 -4.28125l-3.28125 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l4.0625 0q0.21875 0 0.34375 0.125q0.140625 0.125 0.140625 0.3125q0 0.1875 -0.140625 0.359375l-3.5625 4.28125l3.421875 0zm6.2547913 -0.59375q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375zm3.3865662 5.875q-0.171875 0 -0.28125 -0.09375q-0.109375 -0.09375 -0.109375 -0.21875q0 -0.140625 0.109375 -0.234375q0.109375 -0.09375 0.28125 -0.09375l5.21875 0q0.171875 0 0.28125 0.09375q0.109375 0.09375 0.109375 0.234375q0 0.125 -0.109375 0.21875q-0.109375 0.09375 -0.28125 0.09375l-5.21875 0zm11.2500305 -6.609375q0.234375 0 0.359375 0.140625q0.125 0.125 0.125 0.34375l0 5.09375q0 1.296875 -0.671875 1.96875q-0.671875 0.671875 -1.984375 0.671875q-1.28125 0 -2.140625 -0.515625q-0.421875 -0.234375 -0.421875 -0.546875q0 -0.171875 0.078125 -0.28125q0.09375 -0.109375 0.234375 -0.109375q0.125 0 0.4375 0.171875q0.421875 0.21875 0.828125 0.34375q0.40625 0.140625 0.96875 0.140625q0.859375 0 1.28125 -0.453125q0.4375 -0.453125 0.4375 -1.3125l0 -1.03125q-0.25 0.5625 -0.78125 0.859375q-0.515625 0.296875 -1.21875 0.296875q-0.765625 0 -1.359375 -0.359375q-0.59375 -0.359375 -0.9375 -1.015625q-0.328125 -0.65625 -0.328125 -1.515625q0 -0.875 0.328125 -1.53125q0.34375 -0.65625 0.9375 -1.015625q0.59375 -0.359375 1.359375 -0.359375q0.6875 0 1.203125 0.296875q0.515625 0.296875 0.78125 0.84375l0 -0.640625q0 -0.21875 0.125 -0.34375q0.125 -0.140625 0.359375 -0.140625zm-2.28125 4.984375q0.84375 0 1.3125 -0.546875q0.484375 -0.5625 0.484375 -1.546875q0 -0.984375 -0.46875 -1.53125q-0.46875 -0.5625 -1.328125 -0.5625q-0.84375 0 -1.34375 0.5625q-0.484375 0.546875 -0.484375 1.53125q0 0.984375 0.484375 1.546875q0.5 0.546875 1.34375 0.546875zm7.4695435 -4.984375q0.5 -0.03125 0.5 0.40625q0 0.203125 -0.109375 0.3125q-0.109375 0.109375 -0.375 0.140625l-0.359375 0.03125q-0.796875 0.078125 -1.1875 0.578125q-0.390625 0.484375 -0.390625 1.15625l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.140625 -0.359375q0.140625 -0.125 0.34375 -0.125q0.1875 0 0.3125 0.125q0.140625 0.125 0.140625 0.34375l0 0.671875q0.25 -0.53125 0.71875 -0.796875q0.46875 -0.28125 1.0625 -0.328125l0.171875 -0.015625zm3.720398 -0.015625q2.203125 0 2.203125 2.296875l0 3.265625q0 0.21875 -0.125 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -0.578125q-0.21875 0.515625 -0.6875 0.796875q-0.46875 0.28125 -1.078125 0.28125q-0.5625 0 -1.046875 -0.21875q-0.46875 -0.234375 -0.75 -0.640625q-0.265625 -0.40625 -0.265625 -0.90625q0 -0.65625 0.328125 -1.015625q0.34375 -0.375 1.109375 -0.53125q0.765625 -0.15625 2.125 -0.15625l0.265625 0l0 -0.40625q0 -0.71875 -0.296875 -1.046875q-0.28125 -0.34375 -0.953125 -0.34375q-0.8125 0 -1.65625 0.453125q-0.3125 0.203125 -0.453125 0.203125q-0.140625 0 -0.234375 -0.109375q-0.09375 -0.109375 -0.09375 -0.28125q0 -0.171875 0.09375 -0.296875q0.109375 -0.125 0.328125 -0.25q0.421875 -0.25 0.953125 -0.375q0.546875 -0.140625 1.0625 -0.140625zm-0.390625 5.296875q0.71875 0 1.171875 -0.484375q0.46875 -0.484375 0.46875 -1.25l0 -0.34375l-0.21875 0q-1.046875 0 -1.609375 0.09375q-0.546875 0.078125 -0.78125 0.296875q-0.234375 0.203125 -0.234375 0.609375q0 0.46875 0.34375 0.78125q0.34375 0.296875 0.859375 0.296875zm7.3131714 -5.296875q0.765625 0 1.34375 0.390625q0.59375 0.375 0.921875 1.0625q0.328125 0.6875 0.328125 1.609375q0 0.90625 -0.328125 1.59375q-0.328125 0.671875 -0.90625 1.046875q-0.578125 0.359375 -1.359375 0.359375q-0.6875 0 -1.203125 -0.296875q-0.5 -0.296875 -0.765625 -0.84375l0 2.8125q0 0.21875 -0.125 0.34375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.140625q-0.125 -0.125 -0.125 -0.328125l0 -7.234375q0 -0.21875 0.125 -0.34375q0.125 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.140625q0.125 0.125 0.125 0.34375l0 0.640625q0.265625 -0.546875 0.765625 -0.84375q0.515625 -0.296875 1.203125 -0.296875zm-0.203125 5.265625q0.859375 0 1.328125 -0.578125q0.46875 -0.578125 0.46875 -1.625q0 -1.0625 -0.46875 -1.65625q-0.46875 -0.59375 -1.328125 -0.59375q-0.84375 0 -1.3125 0.578125q-0.453125 0.578125 -0.453125 1.640625q0 1.0625 0.453125 1.65625q0.46875 0.578125 1.3125 0.578125zm7.20282 -5.265625q1.03125 0 1.546875 0.578125q0.53125 0.578125 0.53125 1.734375l0 3.25q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -3.21875q0 -0.78125 -0.328125 -1.15625q-0.3125 -0.375 -1.0 -0.375q-0.8125 0 -1.296875 0.5q-0.46875 0.484375 -0.46875 1.328125l0 2.921875q0 0.234375 -0.125 0.359375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -7.625q0 -0.203125 0.125 -0.328125q0.140625 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.125q0.125 0.125 0.125 0.34375l0 3.140625q0.28125 -0.53125 0.796875 -0.796875q0.515625 -0.28125 1.1875 -0.28125zm4.331665 6.046875q-0.28125 0 -0.484375 -0.1875q-0.1875 -0.1875 -0.1875 -0.484375q0 -0.296875 0.1875 -0.484375q0.203125 -0.203125 0.484375 -0.203125q0.28125 0 0.46875 0.203125q0.1875 0.1875 0.1875 0.484375q0 0.296875 -0.1875 0.484375q-0.1875 0.1875 -0.46875 0.1875zm5.2167664 -6.046875q0.765625 0 1.34375 0.390625q0.59375 0.375 0.921875 1.0625q0.328125 0.6875 0.328125 1.609375q0 0.90625 -0.328125 1.59375q-0.328125 0.671875 -0.90625 1.046875q-0.578125 0.359375 -1.359375 0.359375q-0.6875 0 -1.203125 -0.296875q-0.5 -0.296875 -0.765625 -0.84375l0 2.8125q0 0.21875 -0.125 0.34375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.140625q-0.125 -0.125 -0.125 -0.328125l0 -7.234375q0 -0.21875 0.125 -0.34375q0.125 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.140625q0.125 0.125 0.125 0.34375l0 0.640625q0.265625 -0.546875 0.765625 -0.84375q0.515625 -0.296875 1.203125 -0.296875zm-0.203125 5.265625q0.859375 0 1.328125 -0.578125q0.46875 -0.578125 0.46875 -1.625q0 -1.0625 -0.46875 -1.65625q-0.46875 -0.59375 -1.328125 -0.59375q-0.84375 0 -1.3125 0.578125q-0.453125 0.578125 -0.453125 1.640625q0 1.0625 0.453125 1.65625q0.46875 0.578125 1.3125 0.578125zm8.45282 -4.9375q0.140625 -0.296875 0.421875 -0.296875q0.1875 0 0.328125 0.125q0.140625 0.109375 0.140625 0.296875q0 0.109375 -0.046875 0.1875l-3.375 7.28125q-0.0625 0.125 -0.171875 0.1875q-0.109375 0.078125 -0.234375 0.078125q-0.1875 0 -0.328125 -0.109375q-0.125 -0.109375 -0.125 -0.296875q0 -0.09375 0.046875 -0.1875l0.84375 -1.8125l-2.375 -5.140625q-0.046875 -0.078125 -0.046875 -0.171875q0 -0.1875 0.15625 -0.3125q0.15625 -0.140625 0.359375 -0.140625q0.109375 0 0.21875 0.078125q0.125 0.078125 0.1875 0.203125l2.0 4.5l2.0 -4.46875z" fill-rule="nonzero"/><path fill="#f4cccc" d="m132.49081 319.42978l87.49606 0l0 30.992126l-87.49606 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m132.49081 319.42978l87.49606 0l0 30.992126l-87.49606 0z" fill-rule="evenodd"/><path fill="#000000" d="m163.01448 339.50836q-0.234375 0 -0.375 -0.140625q-0.140625 -0.140625 -0.140625 -0.359375l0 -7.1875l-2.578125 0q-0.21875 0 -0.34375 -0.109375q-0.109375 -0.109375 -0.109375 -0.3125q0 -0.203125 0.109375 -0.296875q0.125 -0.109375 0.34375 -0.109375l6.15625 0q0.21875 0 0.328125 0.109375q0.125 0.09375 0.125 0.296875q0 0.203125 -0.125 0.3125q-0.109375 0.109375 -0.328125 0.109375l-2.578125 0l0 7.1875q0 0.21875 -0.140625 0.359375q-0.125 0.140625 -0.34375 0.140625zm8.160431 0.03125q-1.171875 0 -2.046875 -0.515625q-0.859375 -0.53125 -1.328125 -1.5q-0.46875 -0.984375 -0.46875 -2.296875q0 -1.34375 0.453125 -2.3125q0.46875 -0.984375 1.328125 -1.5q0.875 -0.53125 2.0625 -0.53125q1.1875 0 2.0625 0.53125q0.875 0.515625 1.328125 1.5q0.46875 0.96875 0.46875 2.296875q0 1.3125 -0.46875 2.296875q-0.46875 0.984375 -1.34375 1.515625q-0.859375 0.515625 -2.046875 0.515625zm0 -0.84375q1.34375 0 2.09375 -0.90625q0.75 -0.90625 0.75 -2.578125q0 -1.6875 -0.75 -2.578125q-0.734375 -0.90625 -2.09375 -0.90625q-1.34375 0 -2.09375 0.90625q-0.75 0.90625 -0.75 2.578125q0 1.671875 0.75 2.578125q0.75 0.90625 2.09375 0.90625zm9.214935 0.84375q-1.1875 0 -2.0625 -0.515625q-0.875 -0.53125 -1.359375 -1.5q-0.46875 -0.984375 -0.46875 -2.3125q0 -1.328125 0.46875 -2.296875q0.484375 -0.984375 1.359375 -1.5q0.875 -0.53125 2.0625 -0.53125q0.8125 0 1.515625 0.265625q0.71875 0.25 1.25 0.734375q0.1875 0.1875 0.1875 0.421875q0 0.171875 -0.09375 0.296875q-0.09375 0.125 -0.21875 0.125q-0.15625 0 -0.359375 -0.140625q-0.609375 -0.46875 -1.109375 -0.65625q-0.5 -0.203125 -1.140625 -0.203125q-1.390625 0 -2.140625 0.90625q-0.75 0.90625 -0.75 2.578125q0 1.671875 0.75 2.578125q0.75 0.90625 2.140625 0.90625q0.640625 0 1.140625 -0.1875q0.5 -0.1875 1.109375 -0.671875q0.203125 -0.125 0.359375 -0.125q0.125 0 0.21875 0.125q0.09375 0.109375 0.09375 0.296875q0 0.234375 -0.1875 0.40625q-0.53125 0.484375 -1.25 0.75q-0.703125 0.25 -1.515625 0.25zm8.077179 0q-1.171875 0 -2.046875 -0.515625q-0.859375 -0.53125 -1.328125 -1.5q-0.46875 -0.984375 -0.46875 -2.296875q0 -1.34375 0.453125 -2.3125q0.46875 -0.984375 1.328125 -1.5q0.875 -0.53125 2.0625 -0.53125q1.1875 0 2.0625 0.53125q0.875 0.515625 1.328125 1.5q0.46875 0.96875 0.46875 2.296875q0 1.3125 -0.46875 2.296875q-0.46875 0.984375 -1.34375 1.515625q-0.859375 0.515625 -2.046875 0.515625zm0 -0.84375q1.34375 0 2.09375 -0.90625q0.75 -0.90625 0.75 -2.578125q0 -1.6875 -0.75 -2.578125q-0.734375 -0.90625 -2.09375 -0.90625q-1.34375 0 -2.09375 0.90625q-0.75 0.90625 -0.75 2.578125q0 1.671875 0.75 2.578125q0.75 0.90625 2.09375 0.90625z" fill-rule="nonzero"/><path fill="#d9ead3" d="m284.12296 319.3983l87.49606 0l0 30.992126l-87.49606 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m284.12296 319.3983l87.49606 0l0 30.992126l-87.49606 0z" fill-rule="evenodd"/><path fill="#000000" d="m314.7006 332.47687q-0.234375 0 -0.375 -0.140625q-0.140625 -0.140625 -0.140625 -0.359375l0 -7.1875l-2.578125 0q-0.21875 0 -0.34375 -0.109375q-0.109375 -0.109375 -0.109375 -0.3125q0 -0.203125 0.109375 -0.296875q0.125 -0.109375 0.34375 -0.109375l6.15625 0q0.21875 0 0.328125 0.109375q0.125 0.09375 0.125 0.296875q0 0.203125 -0.125 0.3125q-0.109375 0.109375 -0.328125 0.109375l-2.578125 0l0 7.1875q0 0.21875 -0.140625 0.359375q-0.125 0.140625 -0.34375 0.140625zm5.113556 0q-0.21875 0 -0.359375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -7.546875q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.375 -0.125l4.375 0q0.203125 0 0.328125 0.109375q0.125 0.09375 0.125 0.296875q0 0.203125 -0.125 0.3125q-0.125 0.109375 -0.328125 0.109375l-3.90625 0l0 2.90625l3.65625 0q0.21875 0 0.328125 0.109375q0.125 0.109375 0.125 0.3125q0 0.1875 -0.125 0.296875q-0.109375 0.109375 -0.328125 0.109375l-3.65625 0l0 3.453125q0 0.21875 -0.125 0.359375q-0.125 0.140625 -0.359375 0.140625zm6.6840515 -0.0625q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.328125l0 -7.5625q0 -0.234375 0.125 -0.359375q0.140625 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.140625q0.140625 0.125 0.140625 0.359375l0 7.171875l3.875 0q0.21875 0 0.328125 0.109375q0.125 0.109375 0.125 0.3125q0 0.203125 -0.125 0.3125q-0.109375 0.109375 -0.328125 0.109375l-4.375 0zm6.3394165 0.0625q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.125 -0.359375q0.140625 -0.125 0.359375 -0.125q0.21875 0 0.34375 0.125q0.140625 0.125 0.140625 0.359375l0 5.0625q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125zm0 -7.28125q-0.296875 0 -0.484375 -0.171875q-0.171875 -0.171875 -0.171875 -0.453125q0 -0.25 0.171875 -0.421875q0.1875 -0.171875 0.484375 -0.171875q0.28125 0 0.453125 0.171875q0.1875 0.171875 0.1875 0.421875q0 0.28125 -0.1875 0.453125q-0.171875 0.171875 -0.453125 0.171875zm4.987152 6.515625q0.421875 0.03125 0.421875 0.375q0 0.203125 -0.15625 0.3125q-0.140625 0.09375 -0.4375 0.078125l-0.328125 -0.03125q-0.953125 -0.0625 -1.421875 -0.5625q-0.453125 -0.515625 -0.453125 -1.53125l0 -3.015625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -1.359375q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.34375l0 1.359375l1.328125 0q0.1875 0 0.296875 0.109375q0.125 0.109375 0.125 0.28125q0 0.171875 -0.125 0.28125q-0.109375 0.09375 -0.296875 0.09375l-1.328125 0l0 3.0625q0 0.65625 0.265625 0.953125q0.265625 0.296875 0.8125 0.328125l0.3125 0.03125zm5.9081726 -0.65625q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375z" fill-rule="nonzero"/><path fill="#000000" d="m303.37402 346.47687q-0.21875 0 -0.359375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -7.546875q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.375 -0.125l4.375 0q0.203125 0 0.328125 0.109375q0.125 0.09375 0.125 0.296875q0 0.203125 -0.125 0.3125q-0.125 0.109375 -0.328125 0.109375l-3.90625 0l0 2.90625l3.65625 0q0.21875 0 0.328125 0.109375q0.125 0.109375 0.125 0.3125q0 0.1875 -0.125 0.296875q-0.109375 0.109375 -0.328125 0.109375l-3.65625 0l0 3.453125q0 0.21875 -0.125 0.359375q-0.125 0.140625 -0.359375 0.140625zm6.5434265 0q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -7.625q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.359375 -0.125q0.203125 0 0.34375 0.125q0.140625 0.125 0.140625 0.34375l0 7.625q0 0.234375 -0.140625 0.359375q-0.140625 0.125 -0.34375 0.125zm4.674652 -6.046875q2.203125 0 2.203125 2.296875l0 3.265625q0 0.21875 -0.125 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -0.578125q-0.21875 0.515625 -0.6875 0.796875q-0.46875 0.28125 -1.078125 0.28125q-0.5625 0 -1.046875 -0.21875q-0.46875 -0.234375 -0.75 -0.640625q-0.265625 -0.40625 -0.265625 -0.90625q0 -0.65625 0.328125 -1.015625q0.34375 -0.375 1.109375 -0.53125q0.765625 -0.15625 2.125 -0.15625l0.265625 0l0 -0.40625q0 -0.71875 -0.296875 -1.046875q-0.28125 -0.34375 -0.953125 -0.34375q-0.8125 0 -1.65625 0.453125q-0.3125 0.203125 -0.453125 0.203125q-0.140625 0 -0.234375 -0.109375q-0.09375 -0.109375 -0.09375 -0.28125q0 -0.171875 0.09375 -0.296875q0.109375 -0.125 0.328125 -0.25q0.421875 -0.25 0.953125 -0.375q0.546875 -0.140625 1.0625 -0.140625zm-0.390625 5.296875q0.71875 0 1.171875 -0.484375q0.46875 -0.484375 0.46875 -1.25l0 -0.34375l-0.21875 0q-1.046875 0 -1.609375 0.09375q-0.546875 0.078125 -0.78125 0.296875q-0.234375 0.203125 -0.234375 0.609375q0 0.46875 0.34375 0.78125q0.34375 0.296875 0.859375 0.296875zm7.0631714 -0.015625q0.421875 0.03125 0.421875 0.375q0 0.203125 -0.15625 0.3125q-0.140625 0.09375 -0.4375 0.078125l-0.328125 -0.03125q-0.953125 -0.0625 -1.421875 -0.5625q-0.453125 -0.515625 -0.453125 -1.53125l0 -3.015625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -1.359375q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.34375l0 1.359375l1.328125 0q0.1875 0 0.296875 0.109375q0.125 0.109375 0.125 0.28125q0 0.171875 -0.125 0.28125q-0.109375 0.09375 -0.296875 0.09375l-1.328125 0l0 3.0625q0 0.65625 0.265625 0.953125q0.265625 0.296875 0.8125 0.328125l0.3125 0.03125zm4.3300476 -5.28125q0.765625 0 1.34375 0.375q0.59375 0.359375 0.921875 1.046875q0.328125 0.6875 0.328125 1.59375q0 0.90625 -0.328125 1.59375q-0.328125 0.6875 -0.921875 1.078125q-0.578125 0.375 -1.34375 0.375q-0.6875 0 -1.203125 -0.296875q-0.5 -0.296875 -0.765625 -0.84375l0 0.640625q0 0.21875 -0.125 0.359375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -7.625q0 -0.203125 0.125 -0.328125q0.125 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.125q0.125 0.125 0.125 0.34375l0 3.203125q0.265625 -0.546875 0.765625 -0.84375q0.515625 -0.296875 1.203125 -0.296875zm-0.203125 5.265625q0.859375 0 1.328125 -0.59375q0.46875 -0.59375 0.46875 -1.65625q0 -1.046875 -0.46875 -1.625q-0.46875 -0.578125 -1.328125 -0.578125q-0.84375 0 -1.3125 0.578125q-0.453125 0.578125 -0.453125 1.640625q0 1.0625 0.453125 1.65625q0.46875 0.578125 1.3125 0.578125zm8.687164 -5.25q0.21875 0 0.34375 0.140625q0.125 0.125 0.125 0.34375l0 5.078125q0 0.203125 -0.125 0.34375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.34375 -0.125q-0.125 -0.125 -0.125 -0.328125l0 -0.609375q-0.28125 0.53125 -0.78125 0.8125q-0.5 0.265625 -1.125 0.265625q-1.03125 0 -1.5625 -0.578125q-0.53125 -0.578125 -0.53125 -1.71875l0 -3.265625q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625q0.21875 0 0.34375 0.140625q0.125 0.125 0.125 0.34375l0 3.234375q0 0.78125 0.3125 1.15625q0.3125 0.359375 0.984375 0.359375q0.765625 0 1.234375 -0.5q0.46875 -0.5 0.46875 -1.3125l0 -2.9375q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625zm4.8726807 -1.71875q-0.640625 0.046875 -0.96875 0.40625q-0.3125 0.34375 -0.3125 1.046875l0 0.390625l1.328125 0q0.203125 0 0.3125 0.109375q0.109375 0.109375 0.109375 0.28125q0 0.1875 -0.109375 0.28125q-0.109375 0.09375 -0.3125 0.09375l-1.328125 0l0 4.65625q0 0.234375 -0.140625 0.359375q-0.140625 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.140625 -0.125 -0.140625 -0.359375l0 -4.65625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -0.21875q0 -1.078125 0.53125 -1.6875q0.546875 -0.625 1.5625 -0.703125l0.3125 -0.015625q0.3125 -0.03125 0.453125 0.0625q0.140625 0.078125 0.140625 0.296875q0 0.34375 -0.421875 0.390625l-0.3125 0.03125zm3.9360352 0q-0.640625 0.046875 -0.96875 0.40625q-0.3125 0.34375 -0.3125 1.046875l0 0.390625l1.328125 0q0.203125 0 0.3125 0.109375q0.109375 0.109375 0.109375 0.28125q0 0.1875 -0.109375 0.28125q-0.109375 0.09375 -0.3125 0.09375l-1.328125 0l0 4.65625q0 0.234375 -0.140625 0.359375q-0.140625 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.140625 -0.125 -0.140625 -0.359375l0 -4.65625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -0.21875q0 -1.078125 0.53125 -1.6875q0.546875 -0.625 1.5625 -0.703125l0.3125 -0.015625q0.3125 -0.03125 0.453125 0.0625q0.140625 0.078125 0.140625 0.296875q0 0.34375 -0.421875 0.390625l-0.3125 0.03125zm5.873535 6.328125q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375zm6.7927856 -0.734375q0.5 -0.03125 0.5 0.40625q0 0.203125 -0.109375 0.3125q-0.109375 0.109375 -0.375 0.140625l-0.359375 0.03125q-0.796875 0.078125 -1.1875 0.578125q-0.390625 0.484375 -0.390625 1.15625l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.140625 -0.359375q0.140625 -0.125 0.34375 -0.125q0.1875 0 0.3125 0.125q0.140625 0.125 0.140625 0.34375l0 0.671875q0.25 -0.53125 0.71875 -0.796875q0.46875 -0.28125 1.0625 -0.328125l0.171875 -0.015625z" fill-rule="nonzero"/><path fill="#f4cccc" d="m413.02625 319.3983l87.49606 0l0 30.992126l-87.49606 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m413.02625 319.3983l87.49606 0l0 30.992126l-87.49606 0z" fill-rule="evenodd"/><path fill="#000000" d="m443.6039 332.47687q-0.234375 0 -0.375 -0.140625q-0.140625 -0.140625 -0.140625 -0.359375l0 -7.1875l-2.578125 0q-0.21875 0 -0.34375 -0.109375q-0.109375 -0.109375 -0.109375 -0.3125q0 -0.203125 0.109375 -0.296875q0.125 -0.109375 0.34375 -0.109375l6.15625 0q0.21875 0 0.328125 0.109375q0.125 0.09375 0.125 0.296875q0 0.203125 -0.125 0.3125q-0.109375 0.109375 -0.328125 0.109375l-2.578125 0l0 7.1875q0 0.21875 -0.140625 0.359375q-0.125 0.140625 -0.34375 0.140625zm5.113556 0q-0.21875 0 -0.359375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -7.546875q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.375 -0.125l4.375 0q0.203125 0 0.328125 0.109375q0.125 0.09375 0.125 0.296875q0 0.203125 -0.125 0.3125q-0.125 0.109375 -0.328125 0.109375l-3.90625 0l0 2.90625l3.65625 0q0.21875 0 0.328125 0.109375q0.125 0.109375 0.125 0.3125q0 0.1875 -0.125 0.296875q-0.109375 0.109375 -0.328125 0.109375l-3.65625 0l0 3.453125q0 0.21875 -0.125 0.359375q-0.125 0.140625 -0.359375 0.140625zm6.6840515 -0.0625q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.328125l0 -7.5625q0 -0.234375 0.125 -0.359375q0.140625 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.140625q0.140625 0.125 0.140625 0.359375l0 7.171875l3.875 0q0.21875 0 0.328125 0.109375q0.125 0.109375 0.125 0.3125q0 0.203125 -0.125 0.3125q-0.109375 0.109375 -0.328125 0.109375l-4.375 0zm6.3394165 0.0625q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.125 -0.359375q0.140625 -0.125 0.359375 -0.125q0.21875 0 0.34375 0.125q0.140625 0.125 0.140625 0.359375l0 5.0625q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125zm0 -7.28125q-0.296875 0 -0.484375 -0.171875q-0.171875 -0.171875 -0.171875 -0.453125q0 -0.25 0.171875 -0.421875q0.1875 -0.171875 0.484375 -0.171875q0.28125 0 0.453125 0.171875q0.1875 0.171875 0.1875 0.421875q0 0.28125 -0.1875 0.453125q-0.171875 0.171875 -0.453125 0.171875zm4.987152 6.515625q0.421875 0.03125 0.421875 0.375q0 0.203125 -0.15625 0.3125q-0.140625 0.09375 -0.4375 0.078125l-0.328125 -0.03125q-0.953125 -0.0625 -1.421875 -0.5625q-0.453125 -0.515625 -0.453125 -1.53125l0 -3.015625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -1.359375q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.34375l0 1.359375l1.328125 0q0.1875 0 0.296875 0.109375q0.125 0.109375 0.125 0.28125q0 0.171875 -0.125 0.28125q-0.109375 0.09375 -0.296875 0.09375l-1.328125 0l0 3.0625q0 0.65625 0.265625 0.953125q0.265625 0.296875 0.8125 0.328125l0.3125 0.03125zm5.908142 -0.65625q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375z" fill-rule="nonzero"/><path fill="#000000" d="m429.9527 346.47687q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.125 -0.359375q0.140625 -0.125 0.359375 -0.125q0.21875 0 0.34375 0.125q0.140625 0.125 0.140625 0.359375l0 5.0625q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125zm0 -7.28125q-0.296875 0 -0.484375 -0.171875q-0.171875 -0.171875 -0.171875 -0.453125q0 -0.25 0.171875 -0.421875q0.1875 -0.171875 0.484375 -0.171875q0.28125 0 0.453125 0.171875q0.1875 0.171875 0.1875 0.421875q0 0.28125 -0.1875 0.453125q-0.171875 0.171875 -0.453125 0.171875zm5.237152 1.234375q2.09375 0 2.09375 2.3125l0 3.25q0 0.234375 -0.125 0.359375q-0.125 0.125 -0.359375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -3.1875q0 -0.8125 -0.328125 -1.1875q-0.3125 -0.375 -1.0 -0.375q-0.8125 0 -1.296875 0.5q-0.46875 0.484375 -0.46875 1.328125l0 2.921875q0 0.234375 -0.125 0.359375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.21875 0.125 -0.34375q0.125 -0.140625 0.359375 -0.140625q0.21875 0 0.34375 0.140625q0.125 0.125 0.125 0.328125l0 0.609375q0.28125 -0.53125 0.796875 -0.8125q0.53125 -0.28125 1.1875 -0.28125zm6.56604 5.28125q0.421875 0.03125 0.421875 0.375q0 0.203125 -0.15625 0.3125q-0.140625 0.09375 -0.4375 0.078125l-0.328125 -0.03125q-0.953125 -0.0625 -1.421875 -0.5625q-0.453125 -0.515625 -0.453125 -1.53125l0 -3.015625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -1.359375q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.34375l0 1.359375l1.328125 0q0.1875 0 0.296875 0.109375q0.125 0.109375 0.125 0.28125q0 0.171875 -0.125 0.28125q-0.109375 0.09375 -0.296875 0.09375l-1.328125 0l0 3.0625q0 0.65625 0.265625 0.953125q0.265625 0.296875 0.8125 0.328125l0.3125 0.03125zm5.9081726 -0.65625q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375zm6.7927856 -0.734375q0.5 -0.03125 0.5 0.40625q0 0.203125 -0.109375 0.3125q-0.109375 0.109375 -0.375 0.140625l-0.359375 0.03125q-0.796875 0.078125 -1.1875 0.578125q-0.390625 0.484375 -0.390625 1.15625l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.140625 -0.359375q0.140625 -0.125 0.34375 -0.125q0.1875 0 0.3125 0.125q0.140625 0.125 0.140625 0.34375l0 0.671875q0.25 -0.53125 0.71875 -0.796875q0.46875 -0.28125 1.0625 -0.328125l0.171875 -0.015625zm4.282898 -0.015625q0.765625 0 1.34375 0.390625q0.59375 0.375 0.921875 1.0625q0.328125 0.6875 0.328125 1.609375q0 0.90625 -0.328125 1.59375q-0.328125 0.671875 -0.90625 1.046875q-0.578125 0.359375 -1.359375 0.359375q-0.6875 0 -1.203125 -0.296875q-0.5 -0.296875 -0.765625 -0.84375l0 2.8125q0 0.21875 -0.125 0.34375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.140625q-0.125 -0.125 -0.125 -0.328125l0 -7.234375q0 -0.21875 0.125 -0.34375q0.125 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.140625q0.125 0.125 0.125 0.34375l0 0.640625q0.265625 -0.546875 0.765625 -0.84375q0.515625 -0.296875 1.203125 -0.296875zm-0.203125 5.265625q0.859375 0 1.328125 -0.578125q0.46875 -0.578125 0.46875 -1.625q0 -1.0625 -0.46875 -1.65625q-0.46875 -0.59375 -1.328125 -0.59375q-0.84375 0 -1.3125 0.578125q-0.453125 0.578125 -0.453125 1.640625q0 1.0625 0.453125 1.65625q0.46875 0.578125 1.3125 0.578125zm7.14032 -5.25q0.5 -0.03125 0.5 0.40625q0 0.203125 -0.109375 0.3125q-0.109375 0.109375 -0.375 0.140625l-0.359375 0.03125q-0.796875 0.078125 -1.1875 0.578125q-0.390625 0.484375 -0.390625 1.15625l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.140625 -0.359375q0.140625 -0.125 0.34375 -0.125q0.1875 0 0.3125 0.125q0.140625 0.125 0.140625 0.34375l0 0.671875q0.25 -0.53125 0.71875 -0.796875q0.46875 -0.28125 1.0625 -0.328125l0.171875 -0.015625zm5.861023 4.609375q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375zm6.5896606 4.53125q0.421875 0.03125 0.421875 0.375q0 0.203125 -0.15625 0.3125q-0.140625 0.09375 -0.4375 0.078125l-0.328125 -0.03125q-0.953125 -0.0625 -1.421875 -0.5625q-0.453125 -0.515625 -0.453125 -1.53125l0 -3.015625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -1.359375q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.34375l0 1.359375l1.328125 0q0.1875 0 0.296875 0.109375q0.125 0.109375 0.125 0.28125q0 0.171875 -0.125 0.28125q-0.109375 0.09375 -0.296875 0.09375l-1.328125 0l0 3.0625q0 0.65625 0.265625 0.953125q0.265625 0.296875 0.8125 0.328125l0.3125 0.03125zm5.9081726 -0.65625q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375zm6.7927856 -0.734375q0.5 -0.03125 0.5 0.40625q0 0.203125 -0.109375 0.3125q-0.109375 0.109375 -0.375 0.140625l-0.359375 0.03125q-0.796875 0.078125 -1.1875 0.578125q-0.390625 0.484375 -0.390625 1.15625l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.140625 -0.359375q0.140625 -0.125 0.34375 -0.125q0.1875 0 0.3125 0.125q0.140625 0.125 0.140625 0.34375l0 0.671875q0.25 -0.53125 0.71875 -0.796875q0.46875 -0.28125 1.0625 -0.328125l0.171875 -0.015625z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m371.61902 334.89435l41.417297 0" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m371.61902 334.89435l37.990234 0" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m409.60925 334.89435l-1.1245728 1.1246033l3.0897522 -1.1246033l-3.0897522 -1.1245728z" fill-rule="evenodd"/><path fill="#c9daf8" d="m548.5407 277.52954l87.49603 0l0 30.992126l-87.49603 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m548.5407 277.52954l87.49603 0l0 30.992126l-87.49603 0z" fill-rule="evenodd"/><path fill="#000000" d="m587.0588 293.13934q0.1875 0 0.296875 0.109375q0.109375 0.109375 0.109375 0.296875l0 2.984375q0 0.296875 -0.09375 0.4375q-0.078125 0.140625 -0.328125 0.234375q-0.46875 0.203125 -1.15625 0.328125q-0.6875 0.109375 -1.375 0.109375q-1.25 0 -2.171875 -0.515625q-0.90625 -0.515625 -1.390625 -1.484375q-0.484375 -0.96875 -0.484375 -2.328125q0 -1.328125 0.46875 -2.296875q0.484375 -0.984375 1.375 -1.5q0.90625 -0.53125 2.125 -0.53125q0.84375 0 1.5625 0.265625q0.71875 0.25 1.203125 0.734375q0.21875 0.203125 0.21875 0.421875q0 0.171875 -0.109375 0.296875q-0.09375 0.125 -0.234375 0.125q-0.140625 0 -0.328125 -0.140625q-0.625 -0.484375 -1.140625 -0.671875q-0.5 -0.1875 -1.15625 -0.1875q-1.4375 0 -2.203125 0.90625q-0.75 0.890625 -0.75 2.578125q0 1.71875 0.765625 2.609375q0.78125 0.890625 2.28125 0.890625q1.109375 0 2.03125 -0.328125l0 -2.578125l-1.75 0q-0.203125 0 -0.328125 -0.109375q-0.125 -0.109375 -0.125 -0.265625q0 -0.1875 0.125 -0.28125q0.125 -0.109375 0.328125 -0.109375l2.234375 0zm2.8911743 4.46875q-0.21875 0 -0.359375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -7.546875q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.375 -0.125l2.84375 0q1.328125 0 2.0625 0.65625q0.75 0.640625 0.75 1.828125q0 1.1875 -0.75 1.84375q-0.734375 0.65625 -2.0625 0.65625l-2.359375 0l0 3.03125q0 0.21875 -0.140625 0.359375q-0.125 0.140625 -0.359375 0.140625zm2.765625 -4.34375q1.9375 0 1.9375 -1.6875q0 -1.671875 -1.9375 -1.671875l-2.265625 0l0 3.359375l2.265625 0zm7.7869263 4.375q-1.65625 0 -2.515625 -0.859375q-0.84375 -0.859375 -0.84375 -2.546875l0 -4.703125q0 -0.234375 0.125 -0.359375q0.140625 -0.140625 0.359375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.359375l0 4.78125q0 1.25 0.609375 1.875q0.609375 0.609375 1.78125 0.609375q1.171875 0 1.765625 -0.609375q0.609375 -0.625 0.609375 -1.875l0 -4.78125q0 -0.234375 0.140625 -0.359375q0.140625 -0.140625 0.359375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.359375l0 4.703125q0 1.671875 -0.859375 2.546875q-0.859375 0.859375 -2.5 0.859375z" fill-rule="nonzero"/><path fill="#c9daf8" d="m548.5407 319.3983l87.49603 0l0 30.992126l-87.49603 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m548.5407 319.3983l87.49603 0l0 30.992126l-87.49603 0z" fill-rule="evenodd"/><path fill="#000000" d="m584.63763 339.50812q-1.1875 0 -2.0625 -0.515625q-0.875 -0.53125 -1.359375 -1.5q-0.46875 -0.984375 -0.46875 -2.3125q0 -1.328125 0.46875 -2.296875q0.484375 -0.984375 1.359375 -1.5q0.875 -0.53125 2.0625 -0.53125q0.8125 0 1.515625 0.265625q0.71875 0.25 1.25 0.734375q0.1875 0.1875 0.1875 0.421875q0 0.171875 -0.09375 0.296875q-0.09375 0.125 -0.21875 0.125q-0.15625 0 -0.359375 -0.140625q-0.609375 -0.46875 -1.109375 -0.65625q-0.5 -0.203125 -1.140625 -0.203125q-1.390625 0 -2.140625 0.90625q-0.75 0.90625 -0.75 2.578125q0 1.671875 0.75 2.578125q0.75 0.90625 2.140625 0.90625q0.640625 0 1.140625 -0.1875q0.5 -0.1875 1.109375 -0.671875q0.203125 -0.125 0.359375 -0.125q0.125 0 0.21875 0.125q0.09375 0.109375 0.09375 0.296875q0 0.234375 -0.1875 0.40625q-0.53125 0.484375 -1.25 0.75q-0.703125 0.25 -1.515625 0.25zm5.0302734 -0.03125q-0.21875 0 -0.359375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -7.546875q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.375 -0.125l2.84375 0q1.328125 0 2.0625 0.65625q0.75 0.640625 0.75 1.828125q0 1.1875 -0.75 1.84375q-0.734375 0.65625 -2.0625 0.65625l-2.359375 0l0 3.03125q0 0.21875 -0.140625 0.359375q-0.125 0.140625 -0.359375 0.140625zm2.765625 -4.34375q1.9375 0 1.9375 -1.6875q0 -1.671875 -1.9375 -1.671875l-2.265625 0l0 3.359375l2.265625 0zm7.7869263 4.375q-1.65625 0 -2.515625 -0.859375q-0.84375 -0.859375 -0.84375 -2.546875l0 -4.703125q0 -0.234375 0.125 -0.359375q0.140625 -0.140625 0.359375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.359375l0 4.78125q0 1.25 0.609375 1.875q0.609375 0.609375 1.78125 0.609375q1.171875 0 1.765625 -0.609375q0.609375 -0.625 0.609375 -1.875l0 -4.78125q0 -0.234375 0.140625 -0.359375q0.140625 -0.140625 0.359375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.359375l0 4.703125q0 1.671875 -0.859375 2.546875q-0.859375 0.859375 -2.5 0.859375z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m219.98688 334.92584l64.12598 -0.03149414" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m219.98688 334.92584l60.698914 -0.029815674" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m280.68576 334.89603l-1.1240234 1.1251526l3.0892334 -1.1260986l-3.090332 -1.1230774z" fill-rule="evenodd"/><path fill="#d9ead3" d="m413.02625 141.28871l20.53543 0l0 20.53543l-20.53543 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m413.02625 141.28871l20.53543 0l0 20.53543l-20.53543 0z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m437.52493 135.68242l73.763794 0l0 31.748032l-73.763794 0z" fill-rule="evenodd"/><path fill="#000000" d="m448.0718 156.20241q-0.234375 0 -0.375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -7.5q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.375 -0.125l2.34375 0q2.03125 0 3.140625 1.09375q1.109375 1.09375 1.109375 3.125q0 2.03125 -1.125 3.140625q-1.109375 1.09375 -3.125 1.09375l-2.34375 0zm2.28125 -0.84375q3.28125 0 3.28125 -3.390625q0 -3.390625 -3.28125 -3.390625l-1.796875 0l0 6.78125l1.796875 0zm8.3211975 -5.140625q2.203125 0 2.203125 2.296875l0 3.265625q0 0.21875 -0.125 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -0.578125q-0.21875 0.515625 -0.6875 0.796875q-0.46875 0.28125 -1.078125 0.28125q-0.5625 0 -1.046875 -0.21875q-0.46875 -0.234375 -0.75 -0.640625q-0.265625 -0.40625 -0.265625 -0.90625q0 -0.65625 0.328125 -1.015625q0.34375 -0.375 1.109375 -0.53125q0.765625 -0.15625 2.125 -0.15625l0.265625 0l0 -0.40625q0 -0.71875 -0.296875 -1.046875q-0.28125 -0.34375 -0.953125 -0.34375q-0.8125 0 -1.65625 0.453125q-0.3125 0.203125 -0.453125 0.203125q-0.140625 0 -0.234375 -0.109375q-0.09375 -0.109375 -0.09375 -0.28125q0 -0.171875 0.09375 -0.296875q0.109375 -0.125 0.328125 -0.25q0.421875 -0.25 0.953125 -0.375q0.546875 -0.140625 1.0625 -0.140625zm-0.390625 5.296875q0.71875 0 1.171875 -0.484375q0.46875 -0.484375 0.46875 -1.25l0 -0.34375l-0.21875 0q-1.046875 0 -1.609375 0.09375q-0.546875 0.078125 -0.78125 0.296875q-0.234375 0.203125 -0.234375 0.609375q0 0.46875 0.34375 0.78125q0.34375 0.296875 0.859375 0.296875zm7.0631714 -0.015625q0.421875 0.03125 0.421875 0.375q0 0.203125 -0.15625 0.3125q-0.140625 0.09375 -0.4375 0.078125l-0.328125 -0.03125q-0.953125 -0.0625 -1.421875 -0.5625q-0.453125 -0.515625 -0.453125 -1.53125l0 -3.015625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -1.359375q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.34375l0 1.359375l1.328125 0q0.1875 0 0.296875 0.109375q0.125 0.109375 0.125 0.28125q0 0.171875 -0.125 0.28125q-0.109375 0.09375 -0.296875 0.09375l-1.328125 0l0 3.0625q0 0.65625 0.265625 0.953125q0.265625 0.296875 0.8125 0.328125l0.3125 0.03125zm3.767517 -5.28125q2.203125 0 2.203125 2.296875l0 3.265625q0 0.21875 -0.125 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -0.578125q-0.21875 0.515625 -0.6875 0.796875q-0.46875 0.28125 -1.078125 0.28125q-0.5625 0 -1.046875 -0.21875q-0.46875 -0.234375 -0.75 -0.640625q-0.265625 -0.40625 -0.265625 -0.90625q0 -0.65625 0.328125 -1.015625q0.34375 -0.375 1.109375 -0.53125q0.765625 -0.15625 2.125 -0.15625l0.265625 0l0 -0.40625q0 -0.71875 -0.296875 -1.046875q-0.28125 -0.34375 -0.953125 -0.34375q-0.8125 0 -1.65625 0.453125q-0.3125 0.203125 -0.453125 0.203125q-0.140625 0 -0.234375 -0.109375q-0.09375 -0.109375 -0.09375 -0.28125q0 -0.171875 0.09375 -0.296875q0.109375 -0.125 0.328125 -0.25q0.421875 -0.25 0.953125 -0.375q0.546875 -0.140625 1.0625 -0.140625zm-0.390625 5.296875q0.71875 0 1.171875 -0.484375q0.46875 -0.484375 0.46875 -1.25l0 -0.34375l-0.21875 0q-1.046875 0 -1.609375 0.09375q-0.546875 0.078125 -0.78125 0.296875q-0.234375 0.203125 -0.234375 0.609375q0 0.46875 0.34375 0.78125q0.34375 0.296875 0.859375 0.296875zm10.15921 0.75q-0.234375 0 -0.375 -0.140625q-0.140625 -0.140625 -0.140625 -0.359375l0 -7.1875l-2.578125 0q-0.21875 0 -0.34375 -0.109375q-0.109375 -0.109375 -0.109375 -0.3125q0 -0.203125 0.109375 -0.296875q0.125 -0.109375 0.34375 -0.109375l6.15625 0q0.21875 0 0.328125 0.109375q0.125 0.09375 0.125 0.296875q0 0.203125 -0.125 0.3125q-0.109375 0.109375 -0.328125 0.109375l-2.578125 0l0 7.1875q0 0.21875 -0.140625 0.359375q-0.125 0.140625 -0.34375 0.140625zm8.691681 -5.71875q0.140625 -0.296875 0.421875 -0.296875q0.1875 0 0.328125 0.125q0.140625 0.109375 0.140625 0.296875q0 0.109375 -0.046875 0.1875l-3.375 7.28125q-0.0625 0.125 -0.171875 0.1875q-0.109375 0.078125 -0.234375 0.078125q-0.1875 0 -0.328125 -0.109375q-0.125 -0.109375 -0.125 -0.296875q0 -0.09375 0.046875 -0.1875l0.84375 -1.8125l-2.375 -5.140625q-0.046875 -0.078125 -0.046875 -0.171875q0 -0.1875 0.15625 -0.3125q0.15625 -0.140625 0.359375 -0.140625q0.109375 0 0.21875 0.078125q0.125 0.078125 0.1875 0.203125l2.0 4.5l2.0 -4.46875zm4.902405 -0.328125q0.765625 0 1.34375 0.390625q0.59375 0.375 0.921875 1.0625q0.328125 0.6875 0.328125 1.609375q0 0.90625 -0.328125 1.59375q-0.328125 0.671875 -0.90625 1.046875q-0.578125 0.359375 -1.359375 0.359375q-0.6875 0 -1.203125 -0.296875q-0.5 -0.296875 -0.765625 -0.84375l0 2.8125q0 0.21875 -0.125 0.34375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.140625q-0.125 -0.125 -0.125 -0.328125l0 -7.234375q0 -0.21875 0.125 -0.34375q0.125 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.140625q0.125 0.125 0.125 0.34375l0 0.640625q0.265625 -0.546875 0.765625 -0.84375q0.515625 -0.296875 1.203125 -0.296875zm-0.203125 5.265625q0.859375 0 1.328125 -0.578125q0.46875 -0.578125 0.46875 -1.625q0 -1.0625 -0.46875 -1.65625q-0.46875 -0.59375 -1.328125 -0.59375q-0.84375 0 -1.3125 0.578125q-0.453125 0.578125 -0.453125 1.640625q0 1.0625 0.453125 1.65625q0.46875 0.578125 1.3125 0.578125zm8.76532 -0.640625q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375z" fill-rule="nonzero"/><path fill="#f4cccc" d="m519.9029 141.28871l20.5354 0l0 20.53543l-20.5354 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m519.9029 141.28871l20.5354 0l0 20.53543l-20.5354 0z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m544.40155 135.68242l100.0 0l0 31.748032l-100.0 0z" fill-rule="evenodd"/><path fill="#000000" d="m554.9328 156.26491q-0.21875 0 -0.359375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -7.578125q0 -0.234375 0.125 -0.359375q0.140625 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.140625q0.140625 0.125 0.140625 0.359375l0 7.578125q0 0.21875 -0.140625 0.359375q-0.125 0.140625 -0.359375 0.140625zm5.3845215 -6.046875q2.09375 0 2.09375 2.3125l0 3.25q0 0.234375 -0.125 0.359375q-0.125 0.125 -0.359375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -3.1875q0 -0.8125 -0.328125 -1.1875q-0.3125 -0.375 -1.0 -0.375q-0.8125 0 -1.296875 0.5q-0.46875 0.484375 -0.46875 1.328125l0 2.921875q0 0.234375 -0.125 0.359375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.21875 0.125 -0.34375q0.125 -0.140625 0.359375 -0.140625q0.21875 0 0.34375 0.140625q0.125 0.125 0.125 0.328125l0 0.609375q0.28125 -0.53125 0.796875 -0.8125q0.53125 -0.28125 1.1875 -0.28125zm6.456726 -1.703125q-0.640625 0.046875 -0.96875 0.40625q-0.3125 0.34375 -0.3125 1.046875l0 0.390625l1.328125 0q0.203125 0 0.3125 0.109375q0.109375 0.109375 0.109375 0.28125q0 0.1875 -0.109375 0.28125q-0.109375 0.09375 -0.3125 0.09375l-1.328125 0l0 4.65625q0 0.234375 -0.140625 0.359375q-0.140625 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.140625 -0.125 -0.140625 -0.359375l0 -4.65625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -0.21875q0 -1.078125 0.53125 -1.6875q0.546875 -0.625 1.5625 -0.703125l0.3125 -0.015625q0.3125 -0.03125 0.453125 0.0625q0.140625 0.078125 0.140625 0.296875q0 0.34375 -0.421875 0.390625l-0.3125 0.03125zm4.248535 1.71875q0.5 -0.03125 0.5 0.40625q0 0.203125 -0.109375 0.3125q-0.109375 0.109375 -0.375 0.140625l-0.359375 0.03125q-0.796875 0.078125 -1.1875 0.578125q-0.390625 0.484375 -0.390625 1.15625l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.140625 -0.359375q0.140625 -0.125 0.34375 -0.125q0.1875 0 0.3125 0.125q0.140625 0.125 0.140625 0.34375l0 0.671875q0.25 -0.53125 0.71875 -0.796875q0.46875 -0.28125 1.0625 -0.328125l0.171875 -0.015625zm3.720398 -0.015625q2.203125 0 2.203125 2.296875l0 3.265625q0 0.21875 -0.125 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -0.578125q-0.21875 0.515625 -0.6875 0.796875q-0.46875 0.28125 -1.078125 0.28125q-0.5625 0 -1.046875 -0.21875q-0.46875 -0.234375 -0.75 -0.640625q-0.265625 -0.40625 -0.265625 -0.90625q0 -0.65625 0.328125 -1.015625q0.34375 -0.375 1.109375 -0.53125q0.765625 -0.15625 2.125 -0.15625l0.265625 0l0 -0.40625q0 -0.71875 -0.296875 -1.046875q-0.28125 -0.34375 -0.953125 -0.34375q-0.8125 0 -1.65625 0.453125q-0.3125 0.203125 -0.453125 0.203125q-0.140625 0 -0.234375 -0.109375q-0.09375 -0.109375 -0.09375 -0.28125q0 -0.171875 0.09375 -0.296875q0.109375 -0.125 0.328125 -0.25q0.421875 -0.25 0.953125 -0.375q0.546875 -0.140625 1.0625 -0.140625zm-0.390625 5.296875q0.71875 0 1.171875 -0.484375q0.46875 -0.484375 0.46875 -1.25l0 -0.34375l-0.21875 0q-1.046875 0 -1.609375 0.09375q-0.546875 0.078125 -0.78125 0.296875q-0.234375 0.203125 -0.234375 0.609375q0 0.46875 0.34375 0.78125q0.34375 0.296875 0.859375 0.296875zm6.3444214 0.765625q-0.5625 0 -1.0625 -0.125q-0.5 -0.140625 -0.875 -0.375q-0.21875 -0.140625 -0.3125 -0.265625q-0.078125 -0.125 -0.078125 -0.3125q0 -0.15625 0.078125 -0.25q0.09375 -0.109375 0.234375 -0.109375q0.15625 0 0.421875 0.1875q0.359375 0.21875 0.71875 0.34375q0.359375 0.125 0.875 0.125q0.65625 0 1.015625 -0.21875q0.359375 -0.234375 0.359375 -0.671875q0 -0.265625 -0.140625 -0.421875q-0.125 -0.171875 -0.453125 -0.296875q-0.3125 -0.125 -0.9375 -0.25q-1.0625 -0.234375 -1.515625 -0.609375q-0.453125 -0.390625 -0.453125 -1.046875q0 -0.515625 0.28125 -0.90625q0.28125 -0.40625 0.796875 -0.625q0.515625 -0.234375 1.15625 -0.234375q0.46875 0 0.90625 0.125q0.4375 0.125 0.78125 0.34375q0.40625 0.296875 0.40625 0.609375q0 0.15625 -0.09375 0.265625q-0.09375 0.109375 -0.234375 0.109375q-0.140625 0 -0.4375 -0.203125q-0.328125 -0.21875 -0.625 -0.34375q-0.296875 -0.125 -0.75 -0.125q-0.5625 0 -0.90625 0.265625q-0.34375 0.25 -0.34375 0.671875q0 0.25 0.125 0.421875q0.125 0.15625 0.421875 0.28125q0.296875 0.125 0.84375 0.25q0.828125 0.1875 1.265625 0.40625q0.453125 0.203125 0.640625 0.515625q0.203125 0.3125 0.203125 0.796875q0 0.75 -0.640625 1.21875q-0.640625 0.453125 -1.671875 0.453125zm6.47876 -0.78125q0.421875 0.03125 0.421875 0.375q0 0.203125 -0.15625 0.3125q-0.140625 0.09375 -0.4375 0.078125l-0.328125 -0.03125q-0.953125 -0.0625 -1.421875 -0.5625q-0.453125 -0.515625 -0.453125 -1.53125l0 -3.015625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -1.359375q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.34375l0 1.359375l1.328125 0q0.1875 0 0.296875 0.109375q0.125 0.109375 0.125 0.28125q0 0.171875 -0.125 0.28125q-0.109375 0.09375 -0.296875 0.09375l-1.328125 0l0 3.0625q0 0.65625 0.265625 0.953125q0.265625 0.296875 0.8125 0.328125l0.3125 0.03125zm4.283142 -5.265625q0.5 -0.03125 0.5 0.40625q0 0.203125 -0.109375 0.3125q-0.109375 0.109375 -0.375 0.140625l-0.359375 0.03125q-0.796875 0.078125 -1.1875 0.578125q-0.390625 0.484375 -0.390625 1.15625l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.140625 -0.359375q0.140625 -0.125 0.34375 -0.125q0.1875 0 0.3125 0.125q0.140625 0.125 0.140625 0.34375l0 0.671875q0.25 -0.53125 0.71875 -0.796875q0.46875 -0.28125 1.0625 -0.328125l0.171875 -0.015625zm5.782898 0q0.21875 0 0.34375 0.140625q0.125 0.125 0.125 0.34375l0 5.078125q0 0.203125 -0.125 0.34375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.34375 -0.125q-0.125 -0.125 -0.125 -0.328125l0 -0.609375q-0.28125 0.53125 -0.78125 0.8125q-0.5 0.265625 -1.125 0.265625q-1.03125 0 -1.5625 -0.578125q-0.53125 -0.578125 -0.53125 -1.71875l0 -3.265625q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625q0.21875 0 0.34375 0.140625q0.125 0.125 0.125 0.34375l0 3.234375q0 0.78125 0.3125 1.15625q0.3125 0.359375 0.984375 0.359375q0.765625 0 1.234375 -0.5q0.46875 -0.5 0.46875 -1.3125l0 -2.9375q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625zm4.7008057 6.046875q-0.8125 0 -1.453125 -0.359375q-0.625 -0.375 -0.96875 -1.0625q-0.34375 -0.6875 -0.34375 -1.578125q0 -0.90625 0.359375 -1.59375q0.359375 -0.703125 0.984375 -1.078125q0.640625 -0.390625 1.46875 -0.390625q0.453125 0 0.90625 0.125q0.453125 0.125 0.78125 0.359375q0.21875 0.140625 0.3125 0.28125q0.09375 0.140625 0.09375 0.3125q0 0.171875 -0.09375 0.28125q-0.09375 0.09375 -0.234375 0.09375q-0.078125 0 -0.1875 -0.046875q-0.09375 -0.046875 -0.15625 -0.09375q-0.0625 -0.046875 -0.09375 -0.0625q-0.3125 -0.203125 -0.59375 -0.3125q-0.28125 -0.125 -0.6875 -0.125q-0.875 0 -1.359375 0.59375q-0.484375 0.59375 -0.484375 1.65625q0 1.046875 0.484375 1.625q0.484375 0.578125 1.359375 0.578125q0.40625 0 0.703125 -0.109375q0.296875 -0.125 0.59375 -0.328125q0.140625 -0.09375 0.25 -0.15625q0.125 -0.0625 0.203125 -0.0625q0.140625 0 0.21875 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.15625 -0.09375 0.28125q-0.078125 0.125 -0.296875 0.28125q-0.34375 0.234375 -0.8125 0.375q-0.46875 0.125 -0.953125 0.125zm6.029297 -0.78125q0.421875 0.03125 0.421875 0.375q0 0.203125 -0.15625 0.3125q-0.140625 0.09375 -0.4375 0.078125l-0.328125 -0.03125q-0.953125 -0.0625 -1.421875 -0.5625q-0.453125 -0.515625 -0.453125 -1.53125l0 -3.015625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -1.359375q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.34375l0 1.359375l1.328125 0q0.1875 0 0.296875 0.109375q0.125 0.109375 0.125 0.28125q0 0.171875 -0.125 0.28125q-0.109375 0.09375 -0.296875 0.09375l-1.328125 0l0 3.0625q0 0.65625 0.265625 0.953125q0.265625 0.296875 0.8125 0.328125l0.3125 0.03125zm5.830017 -5.265625q0.21875 0 0.34375 0.140625q0.125 0.125 0.125 0.34375l0 5.078125q0 0.203125 -0.125 0.34375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.34375 -0.125q-0.125 -0.125 -0.125 -0.328125l0 -0.609375q-0.28125 0.53125 -0.78125 0.8125q-0.5 0.265625 -1.125 0.265625q-1.03125 0 -1.5625 -0.578125q-0.53125 -0.578125 -0.53125 -1.71875l0 -3.265625q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625q0.21875 0 0.34375 0.140625q0.125 0.125 0.125 0.34375l0 3.234375q0 0.78125 0.3125 1.15625q0.3125 0.359375 0.984375 0.359375q0.765625 0 1.234375 -0.5q0.46875 -0.5 0.46875 -1.3125l0 -2.9375q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625zm5.1851807 0q0.5 -0.03125 0.5 0.40625q0 0.203125 -0.109375 0.3125q-0.109375 0.109375 -0.375 0.140625l-0.359375 0.03125q-0.796875 0.078125 -1.1875 0.578125q-0.390625 0.484375 -0.390625 1.15625l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.140625 -0.359375q0.140625 -0.125 0.34375 -0.125q0.1875 0 0.3125 0.125q0.140625 0.125 0.140625 0.34375l0 0.671875q0.25 -0.53125 0.71875 -0.796875q0.46875 -0.28125 1.0625 -0.328125l0.171875 -0.015625zm5.861023 4.609375q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375z" fill-rule="nonzero"/><path fill="#d9ead3" d="m31.874912 252.53609l87.49606 0l0 30.992142l-87.49606 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m31.874912 252.53609l87.49606 0l0 30.992142l-87.49606 0z" fill-rule="evenodd"/><path fill="#000000" d="m67.27695 264.03653q0.21875 0 0.34375 0.140625q0.125 0.125 0.125 0.359375l0 7.578125q0 0.21875 -0.125 0.359375q-0.125 0.140625 -0.34375 0.140625q-0.234375 0 -0.375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -3.4375l-5.062496 0l0 3.4375q0 0.21875 -0.140625 0.359375q-0.125 0.140625 -0.34375 0.140625q-0.234375 0 -0.359375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -7.578125q0 -0.234375 0.125 -0.359375q0.125 -0.140625 0.359375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.359375l0 3.296875l5.062496 0l0 -3.296875q0 -0.234375 0.125 -0.359375q0.140625 -0.140625 0.375 -0.140625zm3.0648193 8.515625q-0.234375 0 -0.375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -7.5q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.375 -0.125l2.34375 0q2.03125 0 3.140625 1.09375q1.109375 1.09375 1.109375 3.125q0 2.03125 -1.125 3.140625q-1.109375 1.09375 -3.125 1.09375l-2.34375 0zm2.28125 -0.84375q3.28125 0 3.28125 -3.390625q0 -3.390625 -3.28125 -3.390625l-1.796875 0l0 6.78125l1.796875 0zm6.5711823 0.90625q-0.21875 0 -0.359375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -7.546875q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.375 -0.125l4.375 0q0.203125 0 0.328125 0.109375q0.125 0.09375 0.125 0.296875q0 0.203125 -0.125 0.3125q-0.125 0.109375 -0.328125 0.109375l-3.90625 0l0 2.90625l3.65625 0q0.21875 0 0.328125 0.109375q0.125 0.109375 0.125 0.3125q0 0.1875 -0.125 0.296875q-0.109375 0.109375 -0.328125 0.109375l-3.65625 0l0 3.453125q0 0.21875 -0.125 0.359375q-0.125 0.140625 -0.359375 0.140625zm9.0746765 -5.359375q0.8125 0 1.40625 0.34375q0.609375 0.328125 0.9375 0.9375q0.328125 0.59375 0.328125 1.390625q0 0.78125 -0.359375 1.40625q-0.359375 0.625 -1.0 0.96875q-0.640625 0.328125 -1.484375 0.328125q-0.734375 0 -1.453125 -0.25q-0.703125 -0.265625 -1.1875 -0.734375q-0.203125 -0.171875 -0.203125 -0.40625q0 -0.171875 0.09375 -0.296875q0.109375 -0.125 0.234375 -0.125q0.171875 0 0.34375 0.140625q0.515625 0.4375 1.046875 0.640625q0.53125 0.203125 1.109375 0.203125q0.890625 0 1.390625 -0.5q0.5 -0.5 0.5 -1.359375q0 -0.84375 -0.5 -1.359375q-0.5 -0.515625 -1.359375 -0.515625q-1.09375 0 -1.78125 0.84375q-0.15625 0.171875 -0.40625 0.171875q-0.15625 0 -0.28125 -0.09375q-0.109375 -0.109375 -0.109375 -0.296875l0 -4.125q0 -0.21875 0.125 -0.34375q0.125 -0.125 0.359375 -0.125l4.21875 0q0.21875 0 0.34375 0.109375q0.125 0.09375 0.125 0.296875q0 0.1875 -0.125 0.296875q-0.125 0.109375 -0.34375 0.109375l-3.734375 0l0 3.015625q0.34375 -0.328125 0.78125 -0.5q0.453125 -0.171875 0.984375 -0.171875z" fill-rule="nonzero"/><path fill="#d9ead3" d="m190.14 134.76706l87.49608 0l0 30.992126l-87.49608 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m190.14 134.76706l87.49608 0l0 30.992126l-87.49608 0z" fill-rule="evenodd"/><path fill="#000000" d="m215.10997 150.37688q0.1875 0 0.296875 0.109375q0.109375 0.109375 0.109375 0.296875l0 2.984375q0 0.296875 -0.09375 0.4375q-0.078125 0.140625 -0.328125 0.234375q-0.46875 0.203125 -1.15625 0.328125q-0.6875 0.109375 -1.375 0.109375q-1.25 0 -2.171875 -0.515625q-0.90625 -0.515625 -1.390625 -1.484375q-0.484375 -0.96875 -0.484375 -2.328125q0 -1.328125 0.46875 -2.296875q0.484375 -0.984375 1.375 -1.5q0.90625 -0.53125 2.125 -0.53125q0.84375 0 1.5625 0.265625q0.71875 0.25 1.203125 0.734375q0.21875 0.203125 0.21875 0.421875q0 0.171875 -0.109375 0.296875q-0.09375 0.125 -0.234375 0.125q-0.140625 0 -0.328125 -0.140625q-0.625 -0.484375 -1.140625 -0.671875q-0.5 -0.1875 -1.15625 -0.1875q-1.4375 0 -2.203125 0.90625q-0.75 0.890625 -0.75 2.578125q0 1.71875 0.765625 2.609375q0.78125 0.890625 2.28125 0.890625q1.109375 0 2.03125 -0.328125l0 -2.578125l-1.75 0q-0.203125 0 -0.328125 -0.109375q-0.125 -0.109375 -0.125 -0.265625q0 -0.1875 0.125 -0.28125q0.125 -0.109375 0.328125 -0.109375l2.234375 0zm5.1568146 -1.5625q0.5 -0.03125 0.5 0.40625q0 0.203125 -0.109375 0.3125q-0.109375 0.109375 -0.375 0.140625l-0.359375 0.03125q-0.796875 0.078125 -1.1875 0.578125q-0.390625 0.484375 -0.390625 1.15625l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.140625 -0.359375q0.140625 -0.125 0.34375 -0.125q0.1875 0 0.3125 0.125q0.140625 0.125 0.140625 0.34375l0 0.671875q0.25 -0.53125 0.71875 -0.796875q0.46875 -0.28125 1.0625 -0.328125l0.171875 -0.015625zm3.720398 -0.015625q2.203125 0 2.203125 2.296875l0 3.265625q0 0.21875 -0.125 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -0.578125q-0.21875 0.515625 -0.6875 0.796875q-0.46875 0.28125 -1.078125 0.28125q-0.5625 0 -1.046875 -0.21875q-0.46875 -0.234375 -0.75 -0.640625q-0.265625 -0.40625 -0.265625 -0.90625q0 -0.65625 0.328125 -1.015625q0.34375 -0.375 1.109375 -0.53125q0.765625 -0.15625 2.125 -0.15625l0.265625 0l0 -0.40625q0 -0.71875 -0.296875 -1.046875q-0.28125 -0.34375 -0.953125 -0.34375q-0.8125 0 -1.65625 0.453125q-0.3125 0.203125 -0.453125 0.203125q-0.140625 0 -0.234375 -0.109375q-0.09375 -0.109375 -0.09375 -0.28125q0 -0.171875 0.09375 -0.296875q0.109375 -0.125 0.328125 -0.25q0.421875 -0.25 0.953125 -0.375q0.546875 -0.140625 1.0625 -0.140625zm-0.390625 5.296875q0.71875 0 1.171875 -0.484375q0.46875 -0.484375 0.46875 -1.25l0 -0.34375l-0.21875 0q-1.046875 0 -1.609375 0.09375q-0.546875 0.078125 -0.78125 0.296875q-0.234375 0.203125 -0.234375 0.609375q0 0.46875 0.34375 0.78125q0.34375 0.296875 0.859375 0.296875zm7.3131714 -5.296875q0.765625 0 1.34375 0.390625q0.59375 0.375 0.921875 1.0625q0.328125 0.6875 0.328125 1.609375q0 0.90625 -0.328125 1.59375q-0.328125 0.671875 -0.90625 1.046875q-0.578125 0.359375 -1.359375 0.359375q-0.6875 0 -1.203125 -0.296875q-0.5 -0.296875 -0.765625 -0.84375l0 2.8125q0 0.21875 -0.125 0.34375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.140625q-0.125 -0.125 -0.125 -0.328125l0 -7.234375q0 -0.21875 0.125 -0.34375q0.125 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.140625q0.125 0.125 0.125 0.34375l0 0.640625q0.265625 -0.546875 0.765625 -0.84375q0.515625 -0.296875 1.203125 -0.296875zm-0.203125 5.265625q0.859375 0 1.328125 -0.578125q0.46875 -0.578125 0.46875 -1.625q0 -1.0625 -0.46875 -1.65625q-0.46875 -0.59375 -1.328125 -0.59375q-0.84375 0 -1.3125 0.578125q-0.453125 0.578125 -0.453125 1.640625q0 1.0625 0.453125 1.65625q0.46875 0.578125 1.3125 0.578125zm7.2028046 -5.265625q1.03125 0 1.546875 0.578125q0.53125 0.578125 0.53125 1.734375l0 3.25q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -3.21875q0 -0.78125 -0.328125 -1.15625q-0.3125 -0.375 -1.0 -0.375q-0.8125 0 -1.296875 0.5q-0.46875 0.484375 -0.46875 1.328125l0 2.921875q0 0.234375 -0.125 0.359375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -7.625q0 -0.203125 0.125 -0.328125q0.140625 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.125q0.125 0.125 0.125 0.34375l0 3.140625q0.28125 -0.53125 0.796875 -0.796875q0.515625 -0.28125 1.1875 -0.28125zm4.5035553 5.984375q-0.234375 0 -0.375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -7.5q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.375 -0.125l2.34375 0q2.03125 0 3.140625 1.09375q1.109375 1.09375 1.109375 3.125q0 2.03125 -1.125 3.140625q-1.109375 1.09375 -3.125 1.09375l-2.34375 0zm2.28125 -0.84375q3.28125 0 3.28125 -3.390625q0 -3.390625 -3.28125 -3.390625l-1.796875 0l0 6.78125l1.796875 0zm10.461807 -0.515625q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375zm6.480301 -2.453125q-0.640625 0.046875 -0.96875 0.40625q-0.3125 0.34375 -0.3125 1.046875l0 0.390625l1.328125 0q0.203125 0 0.3125 0.109375q0.109375 0.109375 0.109375 0.28125q0 0.1875 -0.109375 0.28125q-0.109375 0.09375 -0.3125 0.09375l-1.328125 0l0 4.65625q0 0.234375 -0.140625 0.359375q-0.140625 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.140625 -0.125 -0.140625 -0.359375l0 -4.65625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -0.21875q0 -1.078125 0.53125 -1.6875q0.546875 -0.625 1.5625 -0.703125l0.3125 -0.015625q0.3125 -0.03125 0.453125 0.0625q0.140625 0.078125 0.140625 0.296875q0 0.34375 -0.421875 0.390625l-0.3125 0.03125z" fill-rule="nonzero"/><path fill="#d9ead3" d="m233.1085 252.53609l87.49608 0l0 30.992142l-87.49608 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m233.1085 252.53609l87.49608 0l0 30.992142l-87.49608 0z" fill-rule="evenodd"/><path fill="#000000" d="m260.00964 265.61465q-0.21875 0 -0.359375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -7.546875q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.375 -0.125l4.375 0q0.203125 0 0.328125 0.109375q0.125 0.09375 0.125 0.296875q0 0.203125 -0.125 0.3125q-0.125 0.109375 -0.328125 0.109375l-3.90625 0l0 2.90625l3.65625 0q0.21875 0 0.328125 0.109375q0.125 0.109375 0.125 0.3125q0 0.1875 -0.125 0.296875q-0.109375 0.109375 -0.328125 0.109375l-3.65625 0l0 3.453125q0 0.21875 -0.125 0.359375q-0.125 0.140625 -0.359375 0.140625zm8.9496765 -6.03125q0.5 -0.03125 0.5 0.40625q0 0.203125 -0.109375 0.3125q-0.109375 0.109375 -0.375 0.140625l-0.359375 0.03125q-0.796875 0.078125 -1.1875 0.578125q-0.390625 0.484375 -0.390625 1.15625l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.140625 -0.359375q0.140625 -0.125 0.34375 -0.125q0.1875 0 0.3125 0.125q0.140625 0.125 0.140625 0.34375l0 0.671875q0.25 -0.53125 0.71875 -0.796875q0.46875 -0.28125 1.0625 -0.328125l0.171875 -0.015625zm3.767273 6.046875q-0.828125 0 -1.46875 -0.359375q-0.625 -0.375 -0.96875 -1.0625q-0.34375 -0.703125 -0.34375 -1.609375q0 -0.90625 0.34375 -1.59375q0.34375 -0.703125 0.96875 -1.0625q0.640625 -0.375 1.46875 -0.375q0.828125 0 1.453125 0.375q0.640625 0.359375 0.984375 1.0625q0.34375 0.6875 0.34375 1.59375q0 0.90625 -0.34375 1.609375q-0.34375 0.6875 -0.984375 1.0625q-0.625 0.359375 -1.453125 0.359375zm0 -0.796875q0.859375 0 1.3125 -0.5625q0.46875 -0.578125 0.46875 -1.671875q0 -1.0625 -0.46875 -1.640625q-0.46875 -0.59375 -1.3125 -0.59375q-0.859375 0 -1.328125 0.59375q-0.46875 0.578125 -0.46875 1.640625q0 1.078125 0.453125 1.65625q0.46875 0.578125 1.34375 0.578125zm8.535065 -0.046875q0.203125 0 0.296875 0.109375q0.109375 0.09375 0.109375 0.265625q0 0.1875 -0.109375 0.296875q-0.09375 0.09375 -0.296875 0.09375l-4.203125 0q-0.203125 0 -0.34375 -0.125q-0.125 -0.125 -0.125 -0.3125q0 -0.1875 0.140625 -0.359375l3.546875 -4.28125l-3.28125 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l4.0625 0q0.21875 0 0.34375 0.125q0.140625 0.125 0.140625 0.3125q0 0.1875 -0.140625 0.359375l-3.5625 4.28125l3.421875 0zm6.2547913 -0.59375q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375zm6.8396606 -0.75q2.09375 0 2.09375 2.3125l0 3.25q0 0.234375 -0.125 0.359375q-0.125 0.125 -0.359375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -3.1875q0 -0.8125 -0.328125 -1.1875q-0.3125 -0.375 -1.0 -0.375q-0.8125 0 -1.296875 0.5q-0.46875 0.484375 -0.46875 1.328125l0 2.921875q0 0.234375 -0.125 0.359375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.21875 0.125 -0.34375q0.125 -0.140625 0.359375 -0.140625q0.21875 0 0.34375 0.140625q0.125 0.125 0.125 0.328125l0 0.609375q0.28125 -0.53125 0.796875 -0.8125q0.53125 -0.28125 1.1875 -0.28125z" fill-rule="nonzero"/><path fill="#000000" d="m258.07846 275.1459q0.1875 0 0.296875 0.109375q0.109375 0.109375 0.109375 0.296875l0 2.984375q0 0.296875 -0.09375 0.4375q-0.078125 0.140625 -0.328125 0.234375q-0.46875 0.203125 -1.15625 0.328125q-0.6875 0.109375 -1.3749847 0.109375q-1.25 0 -2.171875 -0.515625q-0.90625 -0.515625 -1.390625 -1.484375q-0.484375 -0.96875 -0.484375 -2.328125q0 -1.328125 0.46875 -2.296875q0.484375 -0.984375 1.375 -1.5q0.90625 -0.53125 2.125 -0.53125q0.84373474 0 1.5624847 0.265625q0.71875 0.25 1.203125 0.734375q0.21875 0.203125 0.21875 0.421875q0 0.171875 -0.109375 0.296875q-0.09375 0.125 -0.234375 0.125q-0.140625 0 -0.328125 -0.140625q-0.625 -0.484375 -1.140625 -0.671875q-0.5 -0.1875 -1.1562347 -0.1875q-1.4375 0 -2.203125 0.90625q-0.75 0.890625 -0.75 2.578125q0 1.71875 0.765625 2.609375q0.78125 0.890625 2.28125 0.890625q1.1093597 0 2.0312347 -0.328125l0 -2.578125l-1.7499847 0q-0.203125 0 -0.328125 -0.109375q-0.125 -0.109375 -0.125 -0.265625q0 -0.1875 0.125 -0.28125q0.125 -0.109375 0.328125 -0.109375l2.2343597 0zm5.15683 -1.5625q0.5 -0.03125 0.5 0.40625q0 0.203125 -0.109375 0.3125q-0.109375 0.109375 -0.375 0.140625l-0.359375 0.03125q-0.796875 0.078125 -1.1875 0.578125q-0.390625 0.484375 -0.390625 1.15625l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.140625 -0.359375q0.140625 -0.125 0.34375 -0.125q0.1875 0 0.3125 0.125q0.140625 0.125 0.140625 0.34375l0 0.671875q0.25 -0.53125 0.71875 -0.796875q0.46875 -0.28125 1.0625 -0.328125l0.171875 -0.015625zm3.720398 -0.015625q2.203125 0 2.203125 2.296875l0 3.265625q0 0.21875 -0.125 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -0.578125q-0.21875 0.515625 -0.6875 0.796875q-0.46875 0.28125 -1.078125 0.28125q-0.5625 0 -1.046875 -0.21875q-0.46875 -0.234375 -0.75 -0.640625q-0.265625 -0.40625 -0.265625 -0.90625q0 -0.65625 0.328125 -1.015625q0.34375 -0.375 1.109375 -0.53125q0.765625 -0.15625 2.125 -0.15625l0.265625 0l0 -0.40625q0 -0.71875 -0.296875 -1.046875q-0.28125 -0.34375 -0.953125 -0.34375q-0.8125 0 -1.65625 0.453125q-0.3125 0.203125 -0.453125 0.203125q-0.140625 0 -0.234375 -0.109375q-0.09375 -0.109375 -0.09375 -0.28125q0 -0.171875 0.09375 -0.296875q0.109375 -0.125 0.328125 -0.25q0.421875 -0.25 0.953125 -0.375q0.546875 -0.140625 1.0625 -0.140625zm-0.390625 5.296875q0.71875 0 1.171875 -0.484375q0.46875 -0.484375 0.46875 -1.25l0 -0.34375l-0.21875 0q-1.046875 0 -1.609375 0.09375q-0.546875 0.078125 -0.78125 0.296875q-0.234375 0.203125 -0.234375 0.609375q0 0.46875 0.34375 0.78125q0.34375 0.296875 0.859375 0.296875zm7.3131714 -5.296875q0.765625 0 1.34375 0.390625q0.59375 0.375 0.921875 1.0625q0.328125 0.6875 0.328125 1.609375q0 0.90625 -0.328125 1.59375q-0.328125 0.671875 -0.90625 1.046875q-0.578125 0.359375 -1.359375 0.359375q-0.6875 0 -1.203125 -0.296875q-0.5 -0.296875 -0.765625 -0.84375l0 2.8125q0 0.21875 -0.125 0.34375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.140625q-0.125 -0.125 -0.125 -0.328125l0 -7.234375q0 -0.21875 0.125 -0.34375q0.125 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.140625q0.125 0.125 0.125 0.34375l0 0.640625q0.265625 -0.546875 0.765625 -0.84375q0.515625 -0.296875 1.203125 -0.296875zm-0.203125 5.265625q0.859375 0 1.328125 -0.578125q0.46875 -0.578125 0.46875 -1.625q0 -1.0625 -0.46875 -1.65625q-0.46875 -0.59375 -1.328125 -0.59375q-0.84375 0 -1.3125 0.578125q-0.453125 0.578125 -0.453125 1.640625q0 1.0625 0.453125 1.65625q0.46875 0.578125 1.3125 0.578125zm7.2027893 -5.265625q1.03125 0 1.546875 0.578125q0.53125 0.578125 0.53125 1.734375l0 3.25q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -3.21875q0 -0.78125 -0.328125 -1.15625q-0.3125 -0.375 -1.0 -0.375q-0.8125 0 -1.296875 0.5q-0.46875 0.484375 -0.46875 1.328125l0 2.921875q0 0.234375 -0.125 0.359375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -7.625q0 -0.203125 0.125 -0.328125q0.140625 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.125q0.125 0.125 0.125 0.34375l0 3.140625q0.28125 -0.53125 0.796875 -0.796875q0.515625 -0.28125 1.1875 -0.28125zm4.5035706 5.984375q-0.234375 0 -0.375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -7.5q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.375 -0.125l2.34375 0q2.03125 0 3.140625 1.09375q1.109375 1.09375 1.109375 3.125q0 2.03125 -1.125 3.140625q-1.109375 1.09375 -3.125 1.09375l-2.34375 0zm2.28125 -0.84375q3.28125 0 3.28125 -3.390625q0 -3.390625 -3.28125 -3.390625l-1.796875 0l0 6.78125l1.796875 0zm10.461792 -0.515625q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375zm6.480316 -2.453125q-0.640625 0.046875 -0.96875 0.40625q-0.3125 0.34375 -0.3125 1.046875l0 0.390625l1.328125 0q0.203125 0 0.3125 0.109375q0.109375 0.109375 0.109375 0.28125q0 0.1875 -0.109375 0.28125q-0.109375 0.09375 -0.3125 0.09375l-1.328125 0l0 4.65625q0 0.234375 -0.140625 0.359375q-0.140625 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.140625 -0.125 -0.140625 -0.359375l0 -4.65625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -0.21875q0 -1.078125 0.53125 -1.6875q0.546875 -0.625 1.5625 -0.703125l0.3125 -0.015625q0.3125 -0.03125 0.453125 0.0625q0.140625 0.078125 0.140625 0.296875q0 0.34375 -0.421875 0.390625l-0.3125 0.03125z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m276.85565 232.16667l0 20.377945" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m276.85565 232.16667l0 16.950867" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m276.85565 249.11754l-1.1246033 -1.124588l1.1246033 3.0897675l1.1245728 -3.0897675z" fill-rule="evenodd"/><path fill="#f4cccc" d="m31.874016 68.3563l87.49606 0l0 30.992126l-87.49606 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m31.874016 68.3563l87.49606 0l0 30.992126l-87.49606 0z" fill-rule="evenodd"/><path fill="#000000" d="m58.725647 87.669235q0.421875 0.03125 0.421875 0.375q0 0.203125 -0.15625 0.3125q-0.140625 0.09375 -0.4375 0.078125l-0.328125 -0.03125q-0.953125 -0.0625 -1.421875 -0.5625q-0.453125 -0.515625 -0.453125 -1.53125l0 -3.015625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -1.359375q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.34375l0 1.359375l1.328125 0q0.1875 0 0.296875 0.109375q0.125 0.109375 0.125 0.28125q0 0.171875 -0.125 0.28125q-0.109375 0.09375 -0.296875 0.09375l-1.328125 0l0 3.0625q0 0.65625 0.265625 0.953125q0.265625 0.296875 0.8125 0.328125l0.3125 0.03125zm3.9706573 -6.984375q-0.640625 0.046875 -0.96875 0.40625q-0.3125 0.34375 -0.3125 1.046875l0 0.390625l1.328125 0q0.203125 0 0.3125 0.109375q0.109375 0.109375 0.109375 0.28125q0 0.1875 -0.109375 0.28125q-0.109375 0.09375 -0.3125 0.09375l-1.328125 0l0 4.65625q0 0.234375 -0.140625 0.359375q-0.140625 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.140625 -0.125 -0.140625 -0.359375l0 -4.65625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -0.21875q0 -1.078125 0.53125 -1.6875q0.546875 -0.625 1.5625 -0.703125l0.3125 -0.015625q0.3125 -0.03125 0.453125 0.0625q0.140625 0.078125 0.140625 0.296875q0 0.34375 -0.421875 0.390625l-0.3125 0.03125zm1.8266602 7.75q-0.28125 0 -0.484375 -0.1875q-0.1875 -0.1875 -0.1875 -0.484375q0 -0.296875 0.1875 -0.484375q0.203125 -0.203125 0.484375 -0.203125q0.28125 0 0.46875 0.203125q0.1875 0.1875 0.1875 0.484375q0 0.296875 -0.1875 0.484375q-0.1875 0.1875 -0.46875 0.1875zm8.498016 -0.8125q0.171875 0.15625 0.171875 0.359375q0 0.15625 -0.140625 0.296875q-0.140625 0.140625 -0.3125 0.140625q-0.15625 0 -0.328125 -0.140625l-4.484375 -3.921875l0 3.578125q0 0.21875 -0.140625 0.359375q-0.125 0.140625 -0.359375 0.140625q-0.21875 0 -0.359375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -7.578125q0 -0.234375 0.125 -0.359375q0.140625 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.140625q0.140625 0.125 0.140625 0.359375l0 3.4375l4.28125 -3.796875q0.125 -0.140625 0.3125 -0.140625q0.171875 0 0.296875 0.140625q0.140625 0.140625 0.140625 0.3125q0 0.171875 -0.15625 0.328125l-3.875 3.421875l4.09375 3.5625zm5.8329315 -0.609375q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375zm6.792801 -0.734375q0.5 -0.03125 0.5 0.40625q0 0.203125 -0.109375 0.3125q-0.109375 0.109375 -0.375 0.140625l-0.359375 0.03125q-0.796875 0.078125 -1.1875 0.578125q-0.390625 0.484375 -0.390625 1.15625l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.140625 -0.359375q0.140625 -0.125 0.34375 -0.125q0.1875 0 0.3125 0.125q0.140625 0.125 0.140625 0.34375l0 0.671875q0.25 -0.53125 0.71875 -0.796875q0.46875 -0.28125 1.0625 -0.328125l0.171875 -0.015625zm3.720398 -0.015625q2.203125 0 2.203125 2.296875l0 3.265625q0 0.21875 -0.125 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -0.578125q-0.21875 0.515625 -0.6875 0.796875q-0.46875 0.28125 -1.078125 0.28125q-0.5625 0 -1.046875 -0.21875q-0.46875 -0.234375 -0.75 -0.640625q-0.265625 -0.40625 -0.265625 -0.90625q0 -0.65625 0.328125 -1.015625q0.34375 -0.375 1.109375 -0.53125q0.765625 -0.15625 2.125 -0.15625l0.265625 0l0 -0.40625q0 -0.71875 -0.296875 -1.046875q-0.28125 -0.34375 -0.953125 -0.34375q-0.8125 0 -1.65625 0.453125q-0.3125 0.203125 -0.453125 0.203125q-0.140625 0 -0.234375 -0.109375q-0.09375 -0.109375 -0.09375 -0.28125q0 -0.171875 0.09375 -0.296875q0.109375 -0.125 0.328125 -0.25q0.421875 -0.25 0.953125 -0.375q0.546875 -0.140625 1.0625 -0.140625zm-0.390625 5.296875q0.71875 0 1.171875 -0.484375q0.46875 -0.484375 0.46875 -1.25l0 -0.34375l-0.21875 0q-1.046875 0 -1.609375 0.09375q-0.546875 0.078125 -0.78125 0.296875q-0.234375 0.203125 -0.234375 0.609375q0 0.46875 0.34375 0.78125q0.34375 0.296875 0.859375 0.296875zm6.3444214 0.765625q-0.5625 0 -1.0625 -0.125q-0.5 -0.140625 -0.875 -0.375q-0.21875 -0.140625 -0.3125 -0.265625q-0.078125 -0.125 -0.078125 -0.3125q0 -0.15625 0.078125 -0.25q0.09375 -0.109375 0.234375 -0.109375q0.15625 0 0.421875 0.1875q0.359375 0.21875 0.71875 0.34375q0.359375 0.125 0.875 0.125q0.65625 0 1.015625 -0.21875q0.359375 -0.234375 0.359375 -0.671875q0 -0.265625 -0.140625 -0.421875q-0.125 -0.171875 -0.453125 -0.296875q-0.3125 -0.125 -0.9375 -0.25q-1.0625 -0.234375 -1.515625 -0.609375q-0.453125 -0.390625 -0.453125 -1.046875q0 -0.515625 0.28125 -0.90625q0.28125 -0.40625 0.796875 -0.625q0.515625 -0.234375 1.15625 -0.234375q0.46875 0 0.90625 0.125q0.4375 0.125 0.78125 0.34375q0.40625 0.296875 0.40625 0.609375q0 0.15625 -0.09375 0.265625q-0.09375 0.109375 -0.234375 0.109375q-0.140625 0 -0.4375 -0.203125q-0.328125 -0.21875 -0.625 -0.34375q-0.296875 -0.125 -0.75 -0.125q-0.5625 0 -0.90625 0.265625q-0.34375 0.25 -0.34375 0.671875q0 0.25 0.125 0.421875q0.125 0.15625 0.421875 0.28125q0.296875 0.125 0.84375 0.25q0.828125 0.1875 1.265625 0.40625q0.453125 0.203125 0.640625 0.515625q0.203125 0.3125 0.203125 0.796875q0 0.75 -0.640625 1.21875q-0.640625 0.453125 -1.671875 0.453125z" fill-rule="nonzero"/><path fill="#f4cccc" d="m132.49081 68.35761l87.49606 0l0 30.992126l-87.49606 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m132.49081 68.35761l87.49606 0l0 30.992126l-87.49606 0z" fill-rule="evenodd"/><path fill="#000000" d="m152.20152 88.37367q-0.234375 0 -0.375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -7.5q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.375 -0.125l4.484375 0q0.21875 0 0.328125 0.109375q0.125 0.09375 0.125 0.296875q0 0.1875 -0.125 0.296875q-0.109375 0.109375 -0.328125 0.109375l-4.015625 0l0 2.9375l3.78125 0q0.21875 0 0.328125 0.109375q0.125 0.109375 0.125 0.296875q0 0.1875 -0.125 0.296875q-0.109375 0.109375 -0.328125 0.109375l-3.78125 0l0 3.078125l4.015625 0q0.21875 0 0.328125 0.109375q0.125 0.09375 0.125 0.296875q0 0.1875 -0.125 0.296875q-0.109375 0.109375 -0.328125 0.109375l-4.484375 0zm8.31218 0.078125q-0.5625 0 -1.0625 -0.125q-0.5 -0.140625 -0.875 -0.375q-0.21875 -0.140625 -0.3125 -0.265625q-0.078125 -0.125 -0.078125 -0.3125q0 -0.15625 0.078125 -0.25q0.09375 -0.109375 0.234375 -0.109375q0.15625 0 0.421875 0.1875q0.359375 0.21875 0.71875 0.34375q0.359375 0.125 0.875 0.125q0.65625 0 1.015625 -0.21875q0.359375 -0.234375 0.359375 -0.671875q0 -0.265625 -0.140625 -0.421875q-0.125 -0.171875 -0.453125 -0.296875q-0.3125 -0.125 -0.9375 -0.25q-1.0625 -0.234375 -1.515625 -0.609375q-0.453125 -0.390625 -0.453125 -1.046875q0 -0.515625 0.28125 -0.90625q0.28125 -0.40625 0.796875 -0.625q0.515625 -0.234375 1.15625 -0.234375q0.46875 0 0.90625 0.125q0.4375 0.125 0.78125 0.34375q0.40625 0.296875 0.40625 0.609375q0 0.15625 -0.09375 0.265625q-0.09375 0.109375 -0.234375 0.109375q-0.140625 0 -0.4375 -0.203125q-0.328125 -0.21875 -0.625 -0.34375q-0.296875 -0.125 -0.75 -0.125q-0.5625 0 -0.90625 0.265625q-0.34375 0.25 -0.34375 0.671875q0 0.25 0.125 0.421875q0.125 0.15625 0.421875 0.28125q0.296875 0.125 0.84375 0.25q0.828125 0.1875 1.265625 0.40625q0.453125 0.203125 0.640625 0.515625q0.203125 0.3125 0.203125 0.796875q0 0.75 -0.640625 1.21875q-0.640625 0.453125 -1.671875 0.453125zm6.4787903 -0.78125q0.421875 0.03125 0.421875 0.375q0 0.203125 -0.15625 0.3125q-0.140625 0.09375 -0.4375 0.078125l-0.328125 -0.03125q-0.953125 -0.0625 -1.421875 -0.5625q-0.453125 -0.515625 -0.453125 -1.53125l0 -3.015625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -1.359375q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.34375l0 1.359375l1.328125 0q0.1875 0 0.296875 0.109375q0.125 0.109375 0.125 0.28125q0 0.171875 -0.125 0.28125q-0.109375 0.09375 -0.296875 0.09375l-1.328125 0l0 3.0625q0 0.65625 0.265625 0.953125q0.265625 0.296875 0.8125 0.328125l0.3125 0.03125zm1.8769073 0.765625q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.125 -0.359375q0.140625 -0.125 0.359375 -0.125q0.21875 0 0.34375 0.125q0.140625 0.125 0.140625 0.359375l0 5.0625q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125zm0 -7.28125q-0.296875 0 -0.484375 -0.171875q-0.171875 -0.171875 -0.171875 -0.453125q0 -0.25 0.171875 -0.421875q0.1875 -0.171875 0.484375 -0.171875q0.28125 0 0.453125 0.171875q0.1875 0.171875 0.1875 0.421875q0 0.28125 -0.1875 0.453125q-0.171875 0.171875 -0.453125 0.171875zm8.799652 1.234375q1.9375 0 1.9375 2.3125l0 3.25q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.328125 0.125q-0.21875 0 -0.359375 -0.125q-0.140625 -0.125 -0.140625 -0.359375l0 -3.21875q0 -0.8125 -0.296875 -1.171875q-0.28125 -0.359375 -0.890625 -0.359375q-0.734375 0 -1.15625 0.5q-0.421875 0.484375 -0.421875 1.328125l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -3.21875q0 -0.8125 -0.296875 -1.171875q-0.28125 -0.359375 -0.90625 -0.359375q-0.71875 0 -1.140625 0.5q-0.421875 0.484375 -0.421875 1.328125l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.359375 -0.140625q0.203125 0 0.328125 0.125q0.140625 0.125 0.140625 0.34375l0 0.578125q0.265625 -0.515625 0.734375 -0.78125q0.46875 -0.28125 1.078125 -0.28125q1.375 0 1.78125 1.140625q0.265625 -0.515625 0.78125 -0.828125q0.515625 -0.3125 1.171875 -0.3125zm6.0990753 0q2.203125 0 2.203125 2.296875l0 3.265625q0 0.21875 -0.125 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -0.578125q-0.21875 0.515625 -0.6875 0.796875q-0.46875 0.28125 -1.078125 0.28125q-0.5625 0 -1.046875 -0.21875q-0.46875 -0.234375 -0.75 -0.640625q-0.265625 -0.40625 -0.265625 -0.90625q0 -0.65625 0.328125 -1.015625q0.34375 -0.375 1.109375 -0.53125q0.765625 -0.15625 2.125 -0.15625l0.265625 0l0 -0.40625q0 -0.71875 -0.296875 -1.046875q-0.28125 -0.34375 -0.953125 -0.34375q-0.8125 0 -1.65625 0.453125q-0.3125 0.203125 -0.453125 0.203125q-0.140625 0 -0.234375 -0.109375q-0.09375 -0.109375 -0.09375 -0.28125q0 -0.171875 0.09375 -0.296875q0.109375 -0.125 0.328125 -0.25q0.421875 -0.25 0.953125 -0.375q0.546875 -0.140625 1.0625 -0.140625zm-0.390625 5.296875q0.71875 0 1.171875 -0.484375q0.46875 -0.484375 0.46875 -1.25l0 -0.34375l-0.21875 0q-1.046875 0 -1.609375 0.09375q-0.546875 0.078125 -0.78125 0.296875q-0.234375 0.203125 -0.234375 0.609375q0 0.46875 0.34375 0.78125q0.34375 0.296875 0.859375 0.296875zm7.0631714 -0.015625q0.421875 0.03125 0.421875 0.375q0 0.203125 -0.15625 0.3125q-0.140625 0.09375 -0.4375 0.078125l-0.328125 -0.03125q-0.953125 -0.0625 -1.421875 -0.5625q-0.453125 -0.515625 -0.453125 -1.53125l0 -3.015625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -1.359375q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.34375l0 1.359375l1.328125 0q0.1875 0 0.296875 0.109375q0.125 0.109375 0.125 0.28125q0 0.171875 -0.125 0.28125q-0.109375 0.09375 -0.296875 0.09375l-1.328125 0l0 3.0625q0 0.65625 0.265625 0.953125q0.265625 0.296875 0.8125 0.328125l0.3125 0.03125zm3.8144073 0.78125q-0.828125 0 -1.46875 -0.359375q-0.625 -0.375 -0.96875 -1.0625q-0.34375 -0.703125 -0.34375 -1.609375q0 -0.90625 0.34375 -1.59375q0.34375 -0.703125 0.96875 -1.0625q0.640625 -0.375 1.46875 -0.375q0.828125 0 1.453125 0.375q0.640625 0.359375 0.984375 1.0625q0.34375 0.6875 0.34375 1.59375q0 0.90625 -0.34375 1.609375q-0.34375 0.6875 -0.984375 1.0625q-0.625 0.359375 -1.453125 0.359375zm0 -0.796875q0.859375 0 1.3125 -0.5625q0.46875 -0.578125 0.46875 -1.671875q0 -1.0625 -0.46875 -1.640625q-0.46875 -0.59375 -1.3125 -0.59375q-0.859375 0 -1.328125 0.59375q-0.46875 0.578125 -0.46875 1.640625q0 1.078125 0.453125 1.65625q0.46875 0.578125 1.34375 0.578125zm7.1287994 -5.25q0.5 -0.03125 0.5 0.40625q0 0.203125 -0.109375 0.3125q-0.109375 0.109375 -0.375 0.140625l-0.359375 0.03125q-0.796875 0.078125 -1.1875 0.578125q-0.390625 0.484375 -0.390625 1.15625l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.140625 -0.359375q0.140625 -0.125 0.34375 -0.125q0.1875 0 0.3125 0.125q0.140625 0.125 0.140625 0.34375l0 0.671875q0.25 -0.53125 0.71875 -0.796875q0.46875 -0.28125 1.0625 -0.328125l0.171875 -0.015625z" fill-rule="nonzero"/><path fill="#f4cccc" d="m233.1076 68.35761l87.49606 0l0 30.992126l-87.49606 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m233.1076 68.35761l87.49606 0l0 30.992126l-87.49606 0z" fill-rule="evenodd"/><path fill="#000000" d="m269.00754 88.46742q-0.90625 0 -1.734375 -0.265625q-0.8125 -0.265625 -1.3125 -0.734375q-0.171875 -0.15625 -0.171875 -0.40625q0 -0.171875 0.09375 -0.296875q0.09375 -0.125 0.234375 -0.125q0.15625 0 0.328125 0.125q1.109375 0.859375 2.546875 0.859375q1.03125 0 1.578125 -0.390625q0.5625 -0.390625 0.5625 -1.125q0 -0.421875 -0.265625 -0.671875q-0.265625 -0.265625 -0.703125 -0.421875q-0.4375 -0.15625 -1.15625 -0.328125q-0.984375 -0.21875 -1.625 -0.46875q-0.625 -0.265625 -1.015625 -0.734375q-0.390625 -0.46875 -0.390625 -1.21875q0 -0.71875 0.390625 -1.265625q0.390625 -0.5625 1.09375 -0.875q0.703125 -0.3125 1.59375 -0.3125q0.84375 0 1.5625 0.265625q0.734375 0.25 1.234375 0.734375q0.1875 0.1875 0.1875 0.421875q0 0.171875 -0.09375 0.296875q-0.09375 0.125 -0.234375 0.125q-0.125 0 -0.34375 -0.140625q-0.59375 -0.46875 -1.09375 -0.65625q-0.5 -0.203125 -1.21875 -0.203125q-0.984375 0 -1.546875 0.421875q-0.546875 0.40625 -0.546875 1.15625q0 0.625 0.484375 0.953125q0.484375 0.3125 1.5 0.5625q1.09375 0.25 1.71875 0.484375q0.625 0.21875 1.03125 0.671875q0.421875 0.4375 0.421875 1.171875q0 0.71875 -0.390625 1.265625q-0.390625 0.53125 -1.109375 0.828125q-0.703125 0.296875 -1.609375 0.296875zm5.0446777 -0.03125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -7.625q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.359375 -0.125q0.203125 0 0.34375 0.125q0.140625 0.125 0.140625 0.34375l0 7.625q0 0.234375 -0.140625 0.359375q-0.140625 0.125 -0.34375 0.125zm2.784027 0q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.125 -0.359375q0.140625 -0.125 0.359375 -0.125q0.21875 0 0.34375 0.125q0.140625 0.125 0.140625 0.359375l0 5.0625q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125zm0 -7.28125q-0.296875 0 -0.484375 -0.171875q-0.171875 -0.171875 -0.171875 -0.453125q0 -0.25 0.171875 -0.421875q0.1875 -0.171875 0.484375 -0.171875q0.28125 0 0.453125 0.171875q0.1875 0.171875 0.1875 0.421875q0 0.28125 -0.1875 0.453125q-0.171875 0.171875 -0.453125 0.171875zm8.799652 1.234375q1.9375 0 1.9375 2.3125l0 3.25q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.328125 0.125q-0.21875 0 -0.359375 -0.125q-0.140625 -0.125 -0.140625 -0.359375l0 -3.21875q0 -0.8125 -0.296875 -1.171875q-0.28125 -0.359375 -0.890625 -0.359375q-0.734375 0 -1.15625 0.5q-0.421875 0.484375 -0.421875 1.328125l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -3.21875q0 -0.8125 -0.296875 -1.171875q-0.28125 -0.359375 -0.90625 -0.359375q-0.71875 0 -1.140625 0.5q-0.421875 0.484375 -0.421875 1.328125l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.359375 -0.140625q0.203125 0 0.328125 0.125q0.140625 0.125 0.140625 0.34375l0 0.578125q0.265625 -0.515625 0.734375 -0.78125q0.46875 -0.28125 1.078125 -0.28125q1.375 0 1.78125 1.140625q0.265625 -0.515625 0.78125 -0.828125q0.515625 -0.3125 1.171875 -0.3125z" fill-rule="nonzero"/><path fill="#d9ead3" d="m282.5035 134.76706l87.49606 0l0 30.992126l-87.49606 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m282.5035 134.76706l87.49606 0l0 30.992126l-87.49606 0z" fill-rule="evenodd"/><path fill="#000000" d="m297.8283 154.87688q-1.1875 0 -2.0625 -0.515625q-0.875 -0.53125 -1.359375 -1.5q-0.46875 -0.984375 -0.46875 -2.3125q0 -1.328125 0.46875 -2.296875q0.484375 -0.984375 1.359375 -1.5q0.875 -0.53125 2.0625 -0.53125q0.8125 0 1.515625 0.265625q0.71875 0.25 1.25 0.734375q0.1875 0.1875 0.1875 0.421875q0 0.171875 -0.09375 0.296875q-0.09375 0.125 -0.21875 0.125q-0.15625 0 -0.359375 -0.140625q-0.609375 -0.46875 -1.109375 -0.65625q-0.5 -0.203125 -1.140625 -0.203125q-1.390625 0 -2.140625 0.90625q-0.75 0.90625 -0.75 2.578125q0 1.671875 0.75 2.578125q0.75 0.90625 2.140625 0.90625q0.640625 0 1.140625 -0.1875q0.5 -0.1875 1.109375 -0.671875q0.203125 -0.125 0.359375 -0.125q0.125 0 0.21875 0.125q0.09375 0.109375 0.09375 0.296875q0 0.234375 -0.1875 0.40625q-0.53125 0.484375 -1.25 0.75q-0.703125 0.25 -1.515625 0.25zm7.358429 -6.078125q1.03125 0 1.546875 0.578125q0.53125 0.578125 0.53125 1.734375l0 3.25q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -3.21875q0 -0.78125 -0.328125 -1.15625q-0.3125 -0.375 -1.0 -0.375q-0.8125 0 -1.296875 0.5q-0.46875 0.484375 -0.46875 1.328125l0 2.921875q0 0.234375 -0.125 0.359375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -7.625q0 -0.203125 0.125 -0.328125q0.140625 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.125q0.125 0.125 0.125 0.34375l0 3.140625q0.28125 -0.53125 0.796875 -0.796875q0.515625 -0.28125 1.1875 -0.28125zm8.37854 4.625q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375zm6.308441 5.3125q-0.8125 0 -1.453125 -0.359375q-0.625 -0.375 -0.96875 -1.0625q-0.34375 -0.6875 -0.34375 -1.578125q0 -0.90625 0.359375 -1.59375q0.359375 -0.703125 0.984375 -1.078125q0.640625 -0.390625 1.46875 -0.390625q0.453125 0 0.90625 0.125q0.453125 0.125 0.78125 0.359375q0.21875 0.140625 0.3125 0.28125q0.09375 0.140625 0.09375 0.3125q0 0.171875 -0.09375 0.28125q-0.09375 0.09375 -0.234375 0.09375q-0.078125 0 -0.1875 -0.046875q-0.09375 -0.046875 -0.15625 -0.09375q-0.0625 -0.046875 -0.09375 -0.0625q-0.3125 -0.203125 -0.59375 -0.3125q-0.28125 -0.125 -0.6875 -0.125q-0.875 0 -1.359375 0.59375q-0.484375 0.59375 -0.484375 1.65625q0 1.046875 0.484375 1.625q0.484375 0.578125 1.359375 0.578125q0.40625 0 0.703125 -0.109375q0.296875 -0.125 0.59375 -0.328125q0.140625 -0.09375 0.25 -0.15625q0.125 -0.0625 0.203125 -0.0625q0.140625 0 0.21875 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.15625 -0.09375 0.28125q-0.078125 0.125 -0.296875 0.28125q-0.34375 0.234375 -0.8125 0.375q-0.46875 0.125 -0.953125 0.125zm7.998047 -0.84375q0.203125 0.171875 0.203125 0.375q0 0.1875 -0.125 0.328125q-0.125 0.125 -0.3125 0.125q-0.15625 0 -0.328125 -0.140625l-3.125 -2.703125l0 2.359375q0 0.234375 -0.140625 0.359375q-0.140625 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -7.625q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.359375 -0.125q0.203125 0 0.34375 0.125q0.140625 0.125 0.140625 0.34375l0 4.875l2.859375 -2.625q0.15625 -0.140625 0.328125 -0.140625q0.1875 0 0.3125 0.140625q0.140625 0.125 0.140625 0.296875q0 0.203125 -0.171875 0.359375l-2.375 2.109375l2.59375 2.265625zm4.2812805 -5.21875q0.765625 0 1.34375 0.390625q0.59375 0.375 0.921875 1.0625q0.328125 0.6875 0.328125 1.609375q0 0.90625 -0.328125 1.59375q-0.328125 0.671875 -0.90625 1.046875q-0.578125 0.359375 -1.359375 0.359375q-0.6875 0 -1.203125 -0.296875q-0.5 -0.296875 -0.765625 -0.84375l0 2.8125q0 0.21875 -0.125 0.34375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.140625q-0.125 -0.125 -0.125 -0.328125l0 -7.234375q0 -0.21875 0.125 -0.34375q0.125 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.140625q0.125 0.125 0.125 0.34375l0 0.640625q0.265625 -0.546875 0.765625 -0.84375q0.515625 -0.296875 1.203125 -0.296875zm-0.203125 5.265625q0.859375 0 1.328125 -0.578125q0.46875 -0.578125 0.46875 -1.625q0 -1.0625 -0.46875 -1.65625q-0.46875 -0.59375 -1.328125 -0.59375q-0.84375 0 -1.3125 0.578125q-0.453125 0.578125 -0.453125 1.640625q0 1.0625 0.453125 1.65625q0.46875 0.578125 1.3125 0.578125zm6.67157 0.796875q-0.828125 0 -1.46875 -0.359375q-0.625 -0.375 -0.96875 -1.0625q-0.34375 -0.703125 -0.34375 -1.609375q0 -0.90625 0.34375 -1.59375q0.34375 -0.703125 0.96875 -1.0625q0.640625 -0.375 1.46875 -0.375q0.828125 0 1.453125 0.375q0.640625 0.359375 0.984375 1.0625q0.34375 0.6875 0.34375 1.59375q0 0.90625 -0.34375 1.609375q-0.34375 0.6875 -0.984375 1.0625q-0.625 0.359375 -1.453125 0.359375zm0 -0.796875q0.859375 0 1.3125 -0.5625q0.46875 -0.578125 0.46875 -1.671875q0 -1.0625 -0.46875 -1.640625q-0.46875 -0.59375 -1.3125 -0.59375q-0.859375 0 -1.328125 0.59375q-0.46875 0.578125 -0.46875 1.640625q0 1.078125 0.453125 1.65625q0.46875 0.578125 1.34375 0.578125zm4.722534 0.78125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.125 -0.359375q0.140625 -0.125 0.359375 -0.125q0.21875 0 0.34375 0.125q0.140625 0.125 0.140625 0.359375l0 5.0625q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125zm0 -7.28125q-0.296875 0 -0.484375 -0.171875q-0.171875 -0.171875 -0.171875 -0.453125q0 -0.25 0.171875 -0.421875q0.1875 -0.171875 0.484375 -0.171875q0.28125 0 0.453125 0.171875q0.1875 0.171875 0.1875 0.421875q0 0.28125 -0.1875 0.453125q-0.171875 0.171875 -0.453125 0.171875zm5.237152 1.234375q2.09375 0 2.09375 2.3125l0 3.25q0 0.234375 -0.125 0.359375q-0.125 0.125 -0.359375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -3.1875q0 -0.8125 -0.328125 -1.1875q-0.3125 -0.375 -1.0 -0.375q-0.8125 0 -1.296875 0.5q-0.46875 0.484375 -0.46875 1.328125l0 2.921875q0 0.234375 -0.125 0.359375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.21875 0.125 -0.34375q0.125 -0.140625 0.359375 -0.140625q0.21875 0 0.34375 0.140625q0.125 0.125 0.125 0.328125l0 0.609375q0.28125 -0.53125 0.796875 -0.8125q0.53125 -0.28125 1.1875 -0.28125zm6.5660706 5.28125q0.421875 0.03125 0.421875 0.375q0 0.203125 -0.15625 0.3125q-0.140625 0.09375 -0.4375 0.078125l-0.328125 -0.03125q-0.953125 -0.0625 -1.421875 -0.5625q-0.453125 -0.515625 -0.453125 -1.53125l0 -3.015625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -1.359375q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.34375l0 1.359375l1.328125 0q0.1875 0 0.296875 0.109375q0.125 0.109375 0.125 0.28125q0 0.171875 -0.125 0.28125q-0.109375 0.09375 -0.296875 0.09375l-1.328125 0l0 3.0625q0 0.65625 0.265625 0.953125q0.265625 0.296875 0.8125 0.328125l0.3125 0.03125zm3.361267 0.78125q-0.5625 0 -1.0625 -0.125q-0.5 -0.140625 -0.875 -0.375q-0.21875 -0.140625 -0.3125 -0.265625q-0.078125 -0.125 -0.078125 -0.3125q0 -0.15625 0.078125 -0.25q0.09375 -0.109375 0.234375 -0.109375q0.15625 0 0.421875 0.1875q0.359375 0.21875 0.71875 0.34375q0.359375 0.125 0.875 0.125q0.65625 0 1.015625 -0.21875q0.359375 -0.234375 0.359375 -0.671875q0 -0.265625 -0.140625 -0.421875q-0.125 -0.171875 -0.453125 -0.296875q-0.3125 -0.125 -0.9375 -0.25q-1.0625 -0.234375 -1.515625 -0.609375q-0.453125 -0.390625 -0.453125 -1.046875q0 -0.515625 0.28125 -0.90625q0.28125 -0.40625 0.796875 -0.625q0.515625 -0.234375 1.15625 -0.234375q0.46875 0 0.90625 0.125q0.4375 0.125 0.78125 0.34375q0.40625 0.296875 0.40625 0.609375q0 0.15625 -0.09375 0.265625q-0.09375 0.109375 -0.234375 0.109375q-0.140625 0 -0.4375 -0.203125q-0.328125 -0.21875 -0.625 -0.34375q-0.296875 -0.125 -0.75 -0.125q-0.5625 0 -0.90625 0.265625q-0.34375 0.25 -0.34375 0.671875q0 0.25 0.125 0.421875q0.125 0.15625 0.421875 0.28125q0.296875 0.125 0.84375 0.25q0.828125 0.1875 1.265625 0.40625q0.453125 0.203125 0.640625 0.515625q0.203125 0.3125 0.203125 0.796875q0 0.75 -0.640625 1.21875q-0.640625 0.453125 -1.671875 0.453125z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m276.85565 99.34974l0 17.70874l-42.960632 0l0 17.724327" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m276.85565 99.34974l0 17.70874l-42.960632 0l0 14.297249" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m233.89502 131.35573l-1.124588 -1.124588l1.124588 3.0897675l1.1245728 -3.0897675z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m276.85565 99.34974l0 17.70874l49.385803 0l0 17.724327" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m276.85565 99.34974l0 17.70874l49.385803 0l0 14.297249" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m326.24146 131.35573l-1.1245728 -1.124588l1.1245728 3.0897675l1.1246033 -3.0897675z" fill-rule="evenodd"/><path fill="#c9daf8" d="m548.5407 235.66077l87.49603 0l0 30.992126l-87.49603 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m548.5407 235.66077l87.49603 0l0 30.992126l-87.49603 0z" fill-rule="evenodd"/><path fill="#000000" d="m579.47955 247.1612q0.203125 0 0.328125 0.140625q0.125 0.125 0.125 0.359375l0 7.578125q0 0.21875 -0.125 0.359375q-0.125 0.140625 -0.359375 0.140625q-0.234375 0 -0.390625 -0.203125l-4.984375 -6.65625l0 6.359375q0 0.21875 -0.125 0.359375q-0.125 0.140625 -0.34375 0.140625q-0.21875 0 -0.34375 -0.140625q-0.109375 -0.140625 -0.109375 -0.359375l0 -7.578125q0 -0.234375 0.125 -0.359375q0.125 -0.140625 0.359375 -0.140625q0.234375 0 0.40625 0.203125l4.96875 6.65625l0 -6.359375q0 -0.234375 0.125 -0.359375q0.125 -0.140625 0.34375 -0.140625zm8.868103 0q0.203125 0 0.328125 0.140625q0.125 0.125 0.125 0.359375l0 7.578125q0 0.21875 -0.125 0.359375q-0.125 0.140625 -0.359375 0.140625q-0.234375 0 -0.390625 -0.203125l-4.984375 -6.65625l0 6.359375q0 0.21875 -0.125 0.359375q-0.125 0.140625 -0.34375 0.140625q-0.21875 0 -0.34375 -0.140625q-0.109375 -0.140625 -0.109375 -0.359375l0 -7.578125q0 -0.234375 0.125 -0.359375q0.125 -0.140625 0.359375 -0.140625q0.234375 0 0.40625 0.203125l4.96875 6.65625l0 -6.359375q0 -0.234375 0.125 -0.359375q0.125 -0.140625 0.34375 -0.140625zm12.917175 7.953125q0.046875 0.09375 0.046875 0.203125q0 0.171875 -0.140625 0.296875q-0.140625 0.125 -0.328125 0.125q-0.296875 0 -0.421875 -0.296875l-0.84375 -1.9375l-4.53125 0l-0.859375 1.9375q-0.125 0.296875 -0.421875 0.296875q-0.1875 0 -0.34375 -0.125q-0.140625 -0.125 -0.140625 -0.3125q0 -0.09375 0.046875 -0.1875l3.4375 -7.640625q0.078125 -0.15625 0.21875 -0.234375q0.140625 -0.09375 0.3125 -0.09375q0.171875 0 0.3125 0.09375q0.15625 0.078125 0.21875 0.234375l3.4375 7.640625zm-5.859375 -2.421875l3.8125 0l-1.90625 -4.3125l-1.90625 4.3125zm7.78656 3.046875q-0.21875 0 -0.359375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -7.546875q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.375 -0.125l2.84375 0q1.328125 0 2.0625 0.65625q0.75 0.640625 0.75 1.828125q0 1.1875 -0.75 1.84375q-0.734375 0.65625 -2.0625 0.65625l-2.359375 0l0 3.03125q0 0.21875 -0.140625 0.359375q-0.125 0.140625 -0.359375 0.140625zm2.765625 -4.34375q1.9375 0 1.9375 -1.6875q0 -1.671875 -1.9375 -1.671875l-2.265625 0l0 3.359375l2.265625 0zm4.9744263 4.34375q-0.21875 0 -0.359375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -7.578125q0 -0.234375 0.125 -0.359375q0.140625 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.140625q0.140625 0.125 0.140625 0.359375l0 7.578125q0 0.21875 -0.140625 0.359375q-0.125 0.140625 -0.359375 0.140625z" fill-rule="nonzero"/><path fill="#c9daf8" d="m548.5407 193.79199l87.49603 0l0 30.992126l-87.49603 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m548.5407 193.79199l87.49603 0l0 30.992126l-87.49603 0z" fill-rule="evenodd"/><path fill="#000000" d="m589.5417 213.87056q-0.28125 0 -0.484375 -0.1875q-0.1875 -0.1875 -0.1875 -0.484375q0 -0.296875 0.1875 -0.484375q0.203125 -0.203125 0.484375 -0.203125q0.28125 0 0.46875 0.203125q0.1875 0.1875 0.1875 0.484375q0 0.296875 -0.1875 0.484375q-0.1875 0.1875 -0.46875 0.1875zm2.7480469 0q-0.28125 0 -0.484375 -0.1875q-0.1875 -0.1875 -0.1875 -0.484375q0 -0.296875 0.1875 -0.484375q0.203125 -0.203125 0.484375 -0.203125q0.28125 0 0.46875 0.203125q0.1875 0.1875 0.1875 0.484375q0 0.296875 -0.1875 0.484375q-0.1875 0.1875 -0.46875 0.1875zm2.7479858 0q-0.28125 0 -0.484375 -0.1875q-0.1875 -0.1875 -0.1875 -0.484375q0 -0.296875 0.1875 -0.484375q0.203125 -0.203125 0.484375 -0.203125q0.28125 0 0.46875 0.203125q0.1875 0.1875 0.1875 0.484375q0 0.296875 -0.1875 0.484375q-0.1875 0.1875 -0.46875 0.1875z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m75.62294 283.52823l0 17.950958l100.62993 0l0 17.954529" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m75.62295 283.52823l0 17.950928l100.62992 0l0 14.527496" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m176.25287 316.00665l-1.124588 -1.1246033l1.124588 3.0897827l1.124588 -3.0897827z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m276.85654 283.52823l0 17.950958l-100.62991 0l0 17.954529" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m276.85654 283.52823l0 17.950928l-100.62991 0l0 14.527496" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m176.22662 316.00665l-1.124588 -1.1246033l1.124588 3.0897827l1.124588 -3.0897827z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m500.5223 334.89435l24.009003 0l0 0.06298828l24.022522 0" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m500.5223 334.89435l24.009003 0l0 0.06298828l20.595398 0" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m545.1267 334.95734l-1.1245728 1.1246033l3.0897827 -1.1246033l-3.0897827 -1.1245728z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m500.5223 334.89435l24.009003 0l0 -41.858246l24.022522 0" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m500.5223 334.89435l24.009003 0l0 -41.858246l20.595398 0" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m545.1267 293.0361l-1.1245728 1.1245728l3.0897827 -1.1245728l-3.0897827 -1.1246033z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m500.5223 334.89435l24.009003 0l0 -83.74802l24.022522 0" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m500.5223 334.89435l24.009003 0l0 -83.74802l20.595398 0" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m545.1267 251.14633l-1.1245728 1.1245728l3.0897827 -1.1245728l-3.0897827 -1.124588z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m500.5223 334.89435l24.009003 0l0 -125.60629l24.022522 0" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m500.5223 334.89435l24.009003 0l0 -125.60629l20.595398 0" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m545.1267 209.28806l-1.1245728 1.124588l3.0897827 -1.124588l-3.0897827 -1.124588z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m233.88803 165.75919l0 17.70752l42.960632 0l0 17.694061" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m233.88805 165.75919l0 17.70752l42.960617 0l0 14.266968" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m276.84866 197.73367l-1.1245728 -1.124588l1.1245728 3.0897675l1.1246033 -3.0897675z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m326.25156 165.75919l0 17.70752l-49.385834 0l0 17.694061" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m326.25156 165.75919l0 17.70752l-49.385834 0l0 14.266968" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m276.86572 197.73367l-1.1245728 -1.124588l1.1245728 3.0897675l1.1246033 -3.0897675z" fill-rule="evenodd"/><path fill="#d9ead3" d="m132.49171 252.53609l87.49606 0l0 30.992142l-87.49606 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m132.49171 252.53609l87.49606 0l0 30.992142l-87.49606 0z" fill-rule="evenodd"/><path fill="#000000" d="m146.9475 272.6459q-0.90625 0 -1.734375 -0.265625q-0.8125 -0.265625 -1.3125 -0.734375q-0.171875 -0.15625 -0.171875 -0.40625q0 -0.171875 0.09375 -0.296875q0.09375 -0.125 0.234375 -0.125q0.15625 0 0.328125 0.125q1.109375 0.859375 2.546875 0.859375q1.03125 0 1.578125 -0.390625q0.5625 -0.390625 0.5625 -1.125q0 -0.421875 -0.265625 -0.671875q-0.265625 -0.265625 -0.703125 -0.421875q-0.4375 -0.15625 -1.15625 -0.328125q-0.984375 -0.21875 -1.625 -0.46875q-0.625 -0.265625 -1.015625 -0.734375q-0.390625 -0.46875 -0.390625 -1.21875q0 -0.71875 0.390625 -1.265625q0.390625 -0.5625 1.09375 -0.875q0.703125 -0.3125 1.59375 -0.3125q0.84375 0 1.5625 0.265625q0.734375 0.25 1.234375 0.734375q0.1875 0.1875 0.1875 0.421875q0 0.171875 -0.09375 0.296875q-0.09375 0.125 -0.234375 0.125q-0.125 0 -0.34375 -0.140625q-0.59375 -0.46875 -1.09375 -0.65625q-0.5 -0.203125 -1.21875 -0.203125q-0.984375 0 -1.546875 0.421875q-0.546875 0.40625 -0.546875 1.15625q0 0.625 0.484375 0.953125q0.484375 0.3125 1.5 0.5625q1.09375 0.25 1.71875 0.484375q0.625 0.21875 1.03125 0.671875q0.421875 0.4375 0.421875 1.171875q0 0.71875 -0.390625 1.265625q-0.390625 0.53125 -1.109375 0.828125q-0.703125 0.296875 -1.609375 0.296875zm6.9353027 -6.078125q2.203125 0 2.203125 2.296875l0 3.265625q0 0.21875 -0.125 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -0.578125q-0.21875 0.515625 -0.6875 0.796875q-0.46875 0.28125 -1.078125 0.28125q-0.5625 0 -1.046875 -0.21875q-0.46875 -0.234375 -0.75 -0.640625q-0.265625 -0.40625 -0.265625 -0.90625q0 -0.65625 0.328125 -1.015625q0.34375 -0.375 1.109375 -0.53125q0.765625 -0.15625 2.125 -0.15625l0.265625 0l0 -0.40625q0 -0.71875 -0.296875 -1.046875q-0.28125 -0.34375 -0.953125 -0.34375q-0.8125 0 -1.65625 0.453125q-0.3125 0.203125 -0.453125 0.203125q-0.140625 0 -0.234375 -0.109375q-0.09375 -0.109375 -0.09375 -0.28125q0 -0.171875 0.09375 -0.296875q0.109375 -0.125 0.328125 -0.25q0.421875 -0.25 0.953125 -0.375q0.546875 -0.140625 1.0625 -0.140625zm-0.390625 5.296875q0.71875 0 1.171875 -0.484375q0.46875 -0.484375 0.46875 -1.25l0 -0.34375l-0.21875 0q-1.046875 0 -1.609375 0.09375q-0.546875 0.078125 -0.78125 0.296875q-0.234375 0.203125 -0.234375 0.609375q0 0.46875 0.34375 0.78125q0.34375 0.296875 0.859375 0.296875zm8.578796 -4.96875q0.140625 -0.296875 0.421875 -0.296875q0.1875 0 0.328125 0.125q0.140625 0.109375 0.140625 0.296875q0 0.109375 -0.046875 0.1875l-2.34375 5.046875q-0.0625 0.15625 -0.21875 0.25q-0.140625 0.078125 -0.3125 0.078125q-0.15625 0 -0.296875 -0.078125q-0.140625 -0.09375 -0.21875 -0.25l-2.328125 -5.046875q-0.046875 -0.078125 -0.046875 -0.171875q0 -0.1875 0.15625 -0.3125q0.15625 -0.140625 0.359375 -0.140625q0.109375 0 0.21875 0.078125q0.125 0.078125 0.1875 0.203125l2.0 4.5l2.0 -4.46875zm6.480545 4.296875q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375zm8.589676 -3.28125q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.328125l0 7.625q0 0.21875 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.234375 0 -0.359375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -0.640625q-0.265625 0.546875 -0.78125 0.84375q-0.5 0.296875 -1.1875 0.296875q-0.765625 0 -1.359375 -0.375q-0.578125 -0.390625 -0.90625 -1.078125q-0.328125 -0.6875 -0.328125 -1.59375q0 -0.90625 0.328125 -1.59375q0.328125 -0.6875 0.90625 -1.046875q0.59375 -0.375 1.359375 -0.375q0.6875 0 1.1875 0.296875q0.515625 0.296875 0.78125 0.84375l0 -3.203125q0 -0.21875 0.125 -0.34375q0.125 -0.125 0.359375 -0.125zm-2.25 7.796875q0.84375 0 1.296875 -0.578125q0.46875 -0.59375 0.46875 -1.65625q0 -1.0625 -0.46875 -1.640625q-0.453125 -0.578125 -1.296875 -0.578125q-0.859375 0 -1.34375 0.578125q-0.46875 0.578125 -0.46875 1.625q0 1.0625 0.46875 1.65625q0.484375 0.59375 1.34375 0.59375zm12.202805 -7.796875q0.21875 0 0.34375 0.140625q0.125 0.125 0.125 0.359375l0 7.59375q0 0.21875 -0.125 0.359375q-0.109375 0.125 -0.328125 0.125q-0.21875 0 -0.328125 -0.125q-0.109375 -0.140625 -0.109375 -0.359375l0 -6.125l-2.59375 4.984375q-0.171875 0.34375 -0.5 0.34375q-0.3125 0 -0.484375 -0.34375l-2.625 -4.921875l0 6.0625q0 0.21875 -0.109375 0.359375q-0.109375 0.125 -0.328125 0.125q-0.21875 0 -0.34375 -0.125q-0.109375 -0.140625 -0.109375 -0.359375l0 -7.59375q0 -0.234375 0.125 -0.359375q0.140625 -0.140625 0.359375 -0.140625q0.3125 0 0.484375 0.34375l3.046875 5.84375l3.015625 -5.84375q0.09375 -0.1875 0.203125 -0.265625q0.125 -0.078125 0.28125 -0.078125zm4.8576965 8.59375q-0.828125 0 -1.46875 -0.359375q-0.625 -0.375 -0.96875 -1.0625q-0.34375 -0.703125 -0.34375 -1.609375q0 -0.90625 0.34375 -1.59375q0.34375 -0.703125 0.96875 -1.0625q0.640625 -0.375 1.46875 -0.375q0.828125 0 1.453125 0.375q0.640625 0.359375 0.984375 1.0625q0.34375 0.6875 0.34375 1.59375q0 0.90625 -0.34375 1.609375q-0.34375 0.6875 -0.984375 1.0625q-0.625 0.359375 -1.453125 0.359375zm0 -0.796875q0.859375 0 1.3125 -0.5625q0.46875 -0.578125 0.46875 -1.671875q0 -1.0625 -0.46875 -1.640625q-0.46875 -0.59375 -1.3125 -0.59375q-0.859375 0 -1.328125 0.59375q-0.46875 0.578125 -0.46875 1.640625q0 1.078125 0.453125 1.65625q0.46875 0.578125 1.34375 0.578125zm8.925674 -7.796875q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.328125l0 7.625q0 0.21875 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.234375 0 -0.359375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -0.640625q-0.265625 0.546875 -0.78125 0.84375q-0.5 0.296875 -1.1875 0.296875q-0.765625 0 -1.359375 -0.375q-0.578125 -0.390625 -0.90625 -1.078125q-0.328125 -0.6875 -0.328125 -1.59375q0 -0.90625 0.328125 -1.59375q0.328125 -0.6875 0.90625 -1.046875q0.59375 -0.375 1.359375 -0.375q0.6875 0 1.1875 0.296875q0.515625 0.296875 0.78125 0.84375l0 -3.203125q0 -0.21875 0.125 -0.34375q0.125 -0.125 0.359375 -0.125zm-2.25 7.796875q0.84375 0 1.296875 -0.578125q0.46875 -0.59375 0.46875 -1.65625q0 -1.0625 -0.46875 -1.640625q-0.453125 -0.578125 -1.296875 -0.578125q-0.859375 0 -1.34375 0.578125q-0.46875 0.578125 -0.46875 1.625q0 1.0625 0.46875 1.65625q0.484375 0.59375 1.34375 0.59375zm9.06218 -0.640625q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375zm4.386551 5.296875q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -7.625q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.359375 -0.125q0.203125 0 0.34375 0.125q0.140625 0.125 0.140625 0.34375l0 7.625q0 0.234375 -0.140625 0.359375q-0.140625 0.125 -0.34375 0.125z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m176.23885 99.34974l0 153.19684" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m176.23885 99.34974l0 149.76978" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m176.23885 249.1195l-1.124588 -1.124588l1.124588 3.0897675l1.124588 -3.0897675z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m176.23975 283.52823l0 17.950958l0.06298828 0l0 17.954529" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m176.23975 283.52823l0 17.950928l0.06298828 0l0 14.527496" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m176.30273 316.00665l-1.1245728 -1.1246033l1.1245728 3.0897827l1.124588 -3.0897827z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m75.62205 99.34843l0 153.19684" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m75.62205 99.34843l0 149.76978" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m75.62205 249.1182l-1.1245804 -1.124588l1.1245804 3.0897675l1.1245804 -3.0897675z" fill-rule="evenodd"/></g></svg> \ No newline at end of file
+<svg version="1.1" viewBox="0.0 0.0 720.0 540.0" fill="none" stroke="none" stroke-linecap="square" stroke-miterlimit="10" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns="http://www.w3.org/2000/svg"><clipPath id="p.0"><path d="m0 0l720.0 0l0 540.0l-720.0 0l0 -540.0z" clip-rule="nonzero"/></clipPath><g clip-path="url(#p.0)"><path fill="#000000" fill-opacity="0.0" d="m0 0l720.0 0l0 540.0l-720.0 0z" fill-rule="evenodd"/><path fill="#f3f3f3" d="m19.375328 28.750656l361.6378 0l0 358.01575l-361.6378 0z" fill-rule="evenodd"/><path stroke="#cccccc" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m19.375328 28.750656l361.6378 0l0 358.01575l-361.6378 0z" fill-rule="evenodd"/><path fill="#434343" d="m338.49512 374.66016q-0.609375 0 -1.171875 -0.140625q-0.546875 -0.15625 -0.96875 -0.421875q-0.25 -0.15625 -0.359375 -0.296875q-0.09375 -0.140625 -0.09375 -0.34375q0 -0.171875 0.09375 -0.28125q0.109375 -0.109375 0.265625 -0.109375q0.171875 0 0.46875 0.1875q0.40625 0.25 0.796875 0.390625q0.390625 0.140625 0.984375 0.140625q0.71875 0 1.109375 -0.25q0.40625 -0.265625 0.40625 -0.734375q0 -0.296875 -0.15625 -0.46875q-0.140625 -0.1875 -0.5 -0.328125q-0.359375 -0.140625 -1.046875 -0.296875q-1.171875 -0.25 -1.6875 -0.671875q-0.5 -0.421875 -0.5 -1.15625q0 -0.578125 0.3125 -1.015625q0.328125 -0.4375 0.890625 -0.6875q0.5625 -0.265625 1.28125 -0.265625q0.53125 0 1.015625 0.140625q0.484375 0.140625 0.859375 0.390625q0.453125 0.328125 0.453125 0.671875q0 0.171875 -0.109375 0.296875q-0.109375 0.125 -0.25 0.125q-0.15625 0 -0.484375 -0.234375q-0.375 -0.234375 -0.703125 -0.359375q-0.328125 -0.140625 -0.828125 -0.140625q-0.625 0 -1.015625 0.28125q-0.375 0.265625 -0.375 0.734375q0 0.296875 0.140625 0.484375q0.140625 0.171875 0.46875 0.3125q0.328125 0.140625 0.9375 0.28125q0.90625 0.1875 1.40625 0.4375q0.5 0.234375 0.703125 0.578125q0.21875 0.34375 0.21875 0.890625q0 0.828125 -0.703125 1.34375q-0.703125 0.515625 -1.859375 0.515625zm9.241241 -1.59375q0.140625 0 0.25 0.125q0.109375 0.109375 0.109375 0.296875q0 0.328125 -0.46875 0.609375q-0.484375 0.28125 -1.015625 0.421875q-0.53125 0.140625 -1.046875 0.140625q-1.5 0 -2.375 -0.890625q-0.875 -0.890625 -0.875 -2.46875q0 -1.0 0.390625 -1.765625q0.390625 -0.765625 1.078125 -1.1875q0.703125 -0.4375 1.59375 -0.4375q1.265625 0 2.015625 0.828125q0.75 0.828125 0.75 2.25q0 0.265625 -0.109375 0.390625q-0.109375 0.109375 -0.34375 0.109375l-4.296875 0q0.125 2.296875 2.171875 2.296875q0.53125 0 0.890625 -0.140625q0.375 -0.140625 0.8125 -0.390625q0.34375 -0.1875 0.46875 -0.1875zm-2.34375 -4.3125q-0.84375 0 -1.359375 0.53125q-0.515625 0.53125 -0.609375 1.515625l3.765625 0q-0.015625 -1.0 -0.484375 -1.515625q-0.46875 -0.53125 -1.3125 -0.53125zm7.5551147 -0.8125q0.546875 -0.03125 0.546875 0.453125q0 0.21875 -0.125 0.34375q-0.109375 0.125 -0.40625 0.15625l-0.390625 0.03125q-0.890625 0.078125 -1.328125 0.640625q-0.4375 0.546875 -0.4375 1.296875l0 3.234375q0 0.265625 -0.15625 0.40625q-0.140625 0.125 -0.375 0.125q-0.234375 0 -0.390625 -0.140625q-0.15625 -0.140625 -0.15625 -0.390625l0 -5.625q0 -0.25 0.15625 -0.390625q0.15625 -0.140625 0.390625 -0.140625q0.21875 0 0.359375 0.140625q0.140625 0.140625 0.140625 0.375l0 0.75q0.28125 -0.578125 0.796875 -0.890625q0.515625 -0.3125 1.1875 -0.359375l0.1875 -0.015625zm6.157959 0.328125q0.15625 -0.3125 0.46875 -0.3125q0.203125 0 0.359375 0.140625q0.15625 0.125 0.15625 0.328125q0 0.109375 -0.046875 0.203125l-2.59375 5.609375q-0.078125 0.171875 -0.25 0.28125q-0.15625 0.09375 -0.34375 0.09375q-0.171875 0 -0.328125 -0.09375q-0.15625 -0.109375 -0.25 -0.28125l-2.59375 -5.609375q-0.046875 -0.09375 -0.046875 -0.1875q0 -0.203125 0.171875 -0.34375q0.1875 -0.15625 0.390625 -0.15625q0.140625 0 0.265625 0.078125q0.125 0.078125 0.1875 0.234375l2.234375 5.0l2.21875 -4.984375zm7.2099915 4.796875q0.140625 0 0.25 0.125q0.109375 0.109375 0.109375 0.296875q0 0.328125 -0.46875 0.609375q-0.484375 0.28125 -1.015625 0.421875q-0.53125 0.140625 -1.046875 0.140625q-1.5 0 -2.375 -0.890625q-0.875 -0.890625 -0.875 -2.46875q0 -1.0 0.390625 -1.765625q0.390625 -0.765625 1.078125 -1.1875q0.703125 -0.4375 1.59375 -0.4375q1.265625 0 2.015625 0.828125q0.75 0.828125 0.75 2.25q0 0.265625 -0.109375 0.390625q-0.109375 0.109375 -0.34375 0.109375l-4.296875 0q0.125 2.296875 2.171875 2.296875q0.53125 0 0.890625 -0.140625q0.375 -0.140625 0.8125 -0.390625q0.34375 -0.1875 0.46875 -0.1875zm-2.34375 -4.3125q-0.84375 0 -1.359375 0.53125q-0.515625 0.53125 -0.609375 1.515625l3.765625 0q-0.015625 -1.0 -0.484375 -1.515625q-0.46875 -0.53125 -1.3125 -0.53125zm7.5551453 -0.8125q0.546875 -0.03125 0.546875 0.453125q0 0.21875 -0.125 0.34375q-0.109375 0.125 -0.40625 0.15625l-0.390625 0.03125q-0.890625 0.078125 -1.328125 0.640625q-0.4375 0.546875 -0.4375 1.296875l0 3.234375q0 0.265625 -0.15625 0.40625q-0.140625 0.125 -0.375 0.125q-0.234375 0 -0.390625 -0.140625q-0.15625 -0.140625 -0.15625 -0.390625l0 -5.625q0 -0.25 0.15625 -0.390625q0.15625 -0.140625 0.390625 -0.140625q0.21875 0 0.359375 0.140625q0.140625 0.140625 0.140625 0.375l0 0.75q0.28125 -0.578125 0.796875 -0.890625q0.515625 -0.3125 1.1875 -0.359375l0.1875 -0.015625z" fill-rule="nonzero"/><path fill="#d9d9d9" d="m25.624672 36.249344l301.88977 0l0 69.98425l-301.88977 0z" fill-rule="evenodd"/><path stroke="#cccccc" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" stroke-dasharray="4.0,3.0" d="m25.624672 36.249344l301.88977 0l0 69.98425l-301.88977 0z" fill-rule="evenodd"/><path fill="#434343" d="m134.36497 56.831844q-0.234375 0 -0.375 -0.140625q-0.140625 -0.140625 -0.140625 -0.359375l0 -7.1875l-2.578125 0q-0.21875 0 -0.34375 -0.109375q-0.109375 -0.109375 -0.109375 -0.3125q0 -0.203125 0.109375 -0.296875q0.125 -0.109375 0.34375 -0.109375l6.15625 0q0.21875 0 0.328125 0.109375q0.125 0.09375 0.125 0.296875q0 0.203125 -0.125 0.3125q-0.109375 0.109375 -0.328125 0.109375l-2.578125 0l0 7.1875q0 0.21875 -0.140625 0.359375q-0.125 0.140625 -0.34375 0.140625zm9.004181 -1.421875q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375zm6.839676 -0.75q2.09375 0 2.09375 2.3125l0 3.25q0 0.234375 -0.125 0.359375q-0.125 0.125 -0.359375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -3.1875q0 -0.8125 -0.328125 -1.1875q-0.3125 -0.375 -1.0 -0.375q-0.8125 0 -1.296875 0.5q-0.46875 0.484375 -0.46875 1.328125l0 2.921875q0 0.234375 -0.125 0.359375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.21875 0.125 -0.34375q0.125 -0.140625 0.359375 -0.140625q0.21875 0 0.34375 0.140625q0.125 0.125 0.125 0.328125l0 0.609375q0.28125 -0.53125 0.796875 -0.8125q0.53125 -0.28125 1.1875 -0.28125zm5.84729 6.0625q-0.56248474 0 -1.0624847 -0.125q-0.5 -0.140625 -0.875 -0.375q-0.21875 -0.140625 -0.3125 -0.265625q-0.078125 -0.125 -0.078125 -0.3125q0 -0.15625 0.078125 -0.25q0.09375 -0.109375 0.234375 -0.109375q0.15625 0 0.421875 0.1875q0.359375 0.21875 0.71875 0.34375q0.359375 0.125 0.87498474 0.125q0.65625 0 1.015625 -0.21875q0.359375 -0.234375 0.359375 -0.671875q0 -0.265625 -0.140625 -0.421875q-0.125 -0.171875 -0.453125 -0.296875q-0.3125 -0.125 -0.9375 -0.25q-1.0624847 -0.234375 -1.5156097 -0.609375q-0.453125 -0.390625 -0.453125 -1.046875q0 -0.515625 0.28125 -0.90625q0.28125 -0.40625 0.796875 -0.625q0.515625 -0.234375 1.1562347 -0.234375q0.46875 0 0.90625 0.125q0.4375 0.125 0.78125 0.34375q0.40625 0.296875 0.40625 0.609375q0 0.15625 -0.09375 0.265625q-0.09375 0.109375 -0.234375 0.109375q-0.140625 0 -0.4375 -0.203125q-0.328125 -0.21875 -0.625 -0.34375q-0.296875 -0.125 -0.75 -0.125q-0.56248474 0 -0.90623474 0.265625q-0.34375 0.25 -0.34375 0.671875q0 0.25 0.125 0.421875q0.125 0.15625 0.421875 0.28125q0.296875 0.125 0.84373474 0.25q0.828125 0.1875 1.265625 0.40625q0.453125 0.203125 0.640625 0.515625q0.203125 0.3125 0.203125 0.796875q0 0.75 -0.640625 1.21875q-0.640625 0.453125 -1.671875 0.453125zm6.2131653 0q-0.828125 0 -1.46875 -0.359375q-0.625 -0.375 -0.96875 -1.0625q-0.34375 -0.703125 -0.34375 -1.609375q0 -0.90625 0.34375 -1.59375q0.34375 -0.703125 0.96875 -1.0625q0.640625 -0.375 1.46875 -0.375q0.828125 0 1.453125 0.375q0.640625 0.359375 0.984375 1.0625q0.34375 0.6875 0.34375 1.59375q0 0.90625 -0.34375 1.609375q-0.34375 0.6875 -0.984375 1.0625q-0.625 0.359375 -1.453125 0.359375zm0 -0.796875q0.859375 0 1.3125 -0.5625q0.46875 -0.578125 0.46875 -1.671875q0 -1.0625 -0.46875 -1.640625q-0.46875 -0.59375 -1.3125 -0.59375q-0.859375 0 -1.328125 0.59375q-0.46875 0.578125 -0.46875 1.640625q0 1.078125 0.453125 1.65625q0.46875 0.578125 1.34375 0.578125zm7.1288147 -5.25q0.5 -0.03125 0.5 0.40625q0 0.203125 -0.109375 0.3125q-0.109375 0.109375 -0.375 0.140625l-0.359375 0.03125q-0.796875 0.078125 -1.1875 0.578125q-0.390625 0.484375 -0.390625 1.15625l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.140625 -0.359375q0.140625 -0.125 0.34375 -0.125q0.1875 0 0.3125 0.125q0.140625 0.125 0.140625 0.34375l0 0.671875q0.25 -0.53125 0.71875 -0.796875q0.46875 -0.28125 1.0625 -0.328125l0.171875 -0.015625zm1.970398 6.03125q-0.21875 0 -0.359375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -7.546875q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.375 -0.125l4.375 0q0.203125 0 0.328125 0.109375q0.125 0.09375 0.125 0.296875q0 0.203125 -0.125 0.3125q-0.125 0.109375 -0.328125 0.109375l-3.90625 0l0 2.90625l3.65625 0q0.21875 0 0.328125 0.109375q0.125 0.109375 0.125 0.3125q0 0.1875 -0.125 0.296875q-0.109375 0.109375 -0.328125 0.109375l-3.65625 0l0 3.453125q0 0.21875 -0.125 0.359375q-0.125 0.140625 -0.359375 0.140625zm6.5434265 0q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -7.625q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.359375 -0.125q0.203125 0 0.34375 0.125q0.140625 0.125 0.140625 0.34375l0 7.625q0 0.234375 -0.140625 0.359375q-0.140625 0.125 -0.34375 0.125zm4.721527 0.015625q-0.828125 0 -1.46875 -0.359375q-0.625 -0.375 -0.96875 -1.0625q-0.34375 -0.703125 -0.34375 -1.609375q0 -0.90625 0.34375 -1.59375q0.34375 -0.703125 0.96875 -1.0625q0.640625 -0.375 1.46875 -0.375q0.828125 0 1.453125 0.375q0.640625 0.359375 0.984375 1.0625q0.34375 0.6875 0.34375 1.59375q0 0.90625 -0.34375 1.609375q-0.34375 0.6875 -0.984375 1.0625q-0.625 0.359375 -1.453125 0.359375zm0 -0.796875q0.859375 0 1.3125 -0.5625q0.46875 -0.578125 0.46875 -1.671875q0 -1.0625 -0.46875 -1.640625q-0.46875 -0.59375 -1.3125 -0.59375q-0.859375 0 -1.328125 0.59375q-0.46875 0.578125 -0.46875 1.640625q0 1.078125 0.453125 1.65625q0.46875 0.578125 1.34375 0.578125zm12.222534 -4.9375q0.125 -0.28125 0.390625 -0.28125q0.1875 0 0.328125 0.125q0.140625 0.109375 0.140625 0.296875q0 0.078125 -0.03125 0.171875l-1.984375 5.046875q-0.078125 0.15625 -0.21875 0.25q-0.140625 0.078125 -0.296875 0.078125q-0.15625 0 -0.296875 -0.078125q-0.140625 -0.09375 -0.21875 -0.25l-1.65625 -4.21875l-1.640625 4.21875q-0.0625 0.15625 -0.203125 0.25q-0.140625 0.078125 -0.3125 0.078125q-0.15625 0 -0.296875 -0.078125q-0.140625 -0.09375 -0.21875 -0.25l-1.984375 -5.03125q-0.046875 -0.09375 -0.046875 -0.171875q0 -0.1875 0.15625 -0.3125q0.171875 -0.140625 0.359375 -0.140625q0.296875 0 0.40625 0.296875l1.65625 4.421875l1.6875 -4.390625q0.078125 -0.15625 0.203125 -0.234375q0.125 -0.09375 0.265625 -0.09375q0.15625 0 0.28125 0.09375q0.125 0.078125 0.1875 0.234375l1.6875 4.375l1.65625 -4.40625zm12.637604 5.09375q0.046875 0.09375 0.046875 0.203125q0 0.171875 -0.140625 0.296875q-0.140625 0.125 -0.328125 0.125q-0.296875 0 -0.421875 -0.296875l-0.84375 -1.9375l-4.53125 0l-0.859375 1.9375q-0.125 0.296875 -0.421875 0.296875q-0.1875 0 -0.34375 -0.125q-0.140625 -0.125 -0.140625 -0.3125q0 -0.09375 0.046875 -0.1875l3.4375 -7.640625q0.078125 -0.15625 0.21875 -0.234375q0.140625 -0.09375 0.3125 -0.09375q0.171875 0 0.3125 0.09375q0.15625 0.078125 0.21875 0.234375l3.4375 7.640625zm-5.859375 -2.421875l3.8125 0l-1.90625 -4.3125l-1.90625 4.3125zm7.78656 3.046875q-0.21875 0 -0.359375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -7.546875q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.375 -0.125l2.84375 0q1.328125 0 2.0625 0.65625q0.75 0.640625 0.75 1.828125q0 1.1875 -0.75 1.84375q-0.734375 0.65625 -2.0625 0.65625l-2.359375 0l0 3.03125q0 0.21875 -0.140625 0.359375q-0.125 0.140625 -0.359375 0.140625zm2.765625 -4.34375q1.9375 0 1.9375 -1.6875q0 -1.671875 -1.9375 -1.671875l-2.265625 0l0 3.359375l2.265625 0zm4.9744263 4.34375q-0.21875 0 -0.359375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -7.578125q0 -0.234375 0.125 -0.359375q0.140625 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.140625q0.140625 0.125 0.140625 0.359375l0 7.578125q0 0.21875 -0.140625 0.359375q-0.125 0.140625 -0.359375 0.140625zm4.4157715 0.015625q-0.5625 0 -1.0625 -0.125q-0.5 -0.140625 -0.875 -0.375q-0.21875 -0.140625 -0.3125 -0.265625q-0.078125 -0.125 -0.078125 -0.3125q0 -0.15625 0.078125 -0.25q0.09375 -0.109375 0.234375 -0.109375q0.15625 0 0.421875 0.1875q0.359375 0.21875 0.71875 0.34375q0.359375 0.125 0.875 0.125q0.65625 0 1.015625 -0.21875q0.359375 -0.234375 0.359375 -0.671875q0 -0.265625 -0.140625 -0.421875q-0.125 -0.171875 -0.453125 -0.296875q-0.3125 -0.125 -0.9375 -0.25q-1.0625 -0.234375 -1.515625 -0.609375q-0.453125 -0.390625 -0.453125 -1.046875q0 -0.515625 0.28125 -0.90625q0.28125 -0.40625 0.796875 -0.625q0.515625 -0.234375 1.15625 -0.234375q0.46875 0 0.90625 0.125q0.4375 0.125 0.78125 0.34375q0.40625 0.296875 0.40625 0.609375q0 0.15625 -0.09375 0.265625q-0.09375 0.109375 -0.234375 0.109375q-0.140625 0 -0.4375 -0.203125q-0.328125 -0.21875 -0.625 -0.34375q-0.296875 -0.125 -0.75 -0.125q-0.5625 0 -0.90625 0.265625q-0.34375 0.25 -0.34375 0.671875q0 0.25 0.125 0.421875q0.125 0.15625 0.421875 0.28125q0.296875 0.125 0.84375 0.25q0.828125 0.1875 1.265625 0.40625q0.453125 0.203125 0.640625 0.515625q0.203125 0.3125 0.203125 0.796875q0 0.75 -0.640625 1.21875q-0.640625 0.453125 -1.671875 0.453125z" fill-rule="nonzero"/><path fill="#f3f3f3" d="m396.75067 183.75066l249.00787 0l0 203.02364l-249.00787 0z" fill-rule="evenodd"/><path stroke="#cccccc" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m396.75067 183.75066l249.00787 0l0 203.02364l-249.00787 0z" fill-rule="evenodd"/><path fill="#434343" d="m409.42255 374.66803q-0.90625 0 -1.609375 -0.40625q-0.6875 -0.421875 -1.078125 -1.171875q-0.375 -0.765625 -0.375 -1.765625q0 -1.0 0.390625 -1.765625q0.40625 -0.78125 1.109375 -1.203125q0.703125 -0.4375 1.625 -0.4375q0.5 0 1.0 0.140625q0.5 0.140625 0.875 0.40625q0.234375 0.171875 0.328125 0.328125q0.109375 0.140625 0.109375 0.328125q0 0.1875 -0.109375 0.3125q-0.09375 0.109375 -0.25 0.109375q-0.09375 0 -0.203125 -0.046875q-0.09375 -0.046875 -0.171875 -0.09375q-0.078125 -0.0625 -0.09375 -0.078125q-0.359375 -0.234375 -0.671875 -0.359375q-0.3125 -0.140625 -0.765625 -0.140625q-0.96875 0 -1.515625 0.671875q-0.53125 0.65625 -0.53125 1.828125q0 1.171875 0.53125 1.8125q0.546875 0.640625 1.515625 0.640625q0.453125 0 0.78125 -0.125q0.328125 -0.140625 0.65625 -0.375q0.15625 -0.09375 0.28125 -0.15625q0.140625 -0.0625 0.234375 -0.0625q0.140625 0 0.234375 0.125q0.109375 0.109375 0.109375 0.296875q0 0.171875 -0.09375 0.3125q-0.09375 0.140625 -0.34375 0.3125q-0.375 0.25 -0.90625 0.40625q-0.515625 0.15625 -1.0625 0.15625zm4.2591553 -0.03125q-0.234375 0 -0.390625 -0.140625q-0.15625 -0.140625 -0.15625 -0.390625l0 -8.46875q0 -0.25 0.15625 -0.390625q0.15625 -0.140625 0.390625 -0.140625q0.21875 0 0.375 0.140625q0.15625 0.140625 0.15625 0.390625l0 8.46875q0 0.25 -0.15625 0.390625q-0.15625 0.140625 -0.375 0.140625zm3.092102 0q-0.234375 0 -0.390625 -0.140625q-0.15625 -0.140625 -0.15625 -0.390625l0 -5.625q0 -0.25 0.15625 -0.390625q0.15625 -0.140625 0.390625 -0.140625q0.234375 0 0.375 0.140625q0.15625 0.140625 0.15625 0.390625l0 5.625q0 0.265625 -0.15625 0.40625q-0.140625 0.125 -0.375 0.125zm0 -8.09375q-0.3125 0 -0.515625 -0.171875q-0.203125 -0.1875 -0.203125 -0.5q0 -0.296875 0.203125 -0.484375q0.203125 -0.1875 0.515625 -0.1875q0.328125 0 0.515625 0.1875q0.203125 0.1875 0.203125 0.484375q0 0.3125 -0.203125 0.5q-0.1875 0.171875 -0.515625 0.171875zm7.5765076 6.53125q0.140625 0 0.25 0.125q0.109375 0.109375 0.109375 0.296875q0 0.328125 -0.46875 0.609375q-0.484375 0.28125 -1.015625 0.421875q-0.53125 0.140625 -1.046875 0.140625q-1.5 0 -2.375 -0.890625q-0.875 -0.890625 -0.875 -2.46875q0 -1.0 0.390625 -1.765625q0.390625 -0.765625 1.078125 -1.1875q0.703125 -0.4375 1.59375 -0.4375q1.265625 0 2.015625 0.828125q0.75 0.828125 0.75 2.25q0 0.265625 -0.109375 0.390625q-0.109375 0.109375 -0.34375 0.109375l-4.296875 0q0.125 2.296875 2.171875 2.296875q0.53125 0 0.890625 -0.140625q0.375 -0.140625 0.8125 -0.390625q0.34375 -0.1875 0.46875 -0.1875zm-2.34375 -4.3125q-0.84375 0 -1.359375 0.53125q-0.515625 0.53125 -0.609375 1.515625l3.765625 0q-0.015625 -1.0 -0.484375 -1.515625q-0.46875 -0.53125 -1.3125 -0.53125zm7.6020203 -0.84375q2.328125 0 2.328125 2.578125l0 3.609375q0 0.25 -0.140625 0.390625q-0.140625 0.140625 -0.390625 0.140625q-0.25 0 -0.40625 -0.140625q-0.140625 -0.140625 -0.140625 -0.390625l0 -3.546875q0 -0.90625 -0.359375 -1.3125q-0.34375 -0.421875 -1.125 -0.421875q-0.890625 0 -1.421875 0.546875q-0.53125 0.546875 -0.53125 1.484375l0 3.25q0 0.25 -0.140625 0.390625q-0.140625 0.140625 -0.390625 0.140625q-0.25 0 -0.40625 -0.140625q-0.140625 -0.140625 -0.140625 -0.390625l0 -5.625q0 -0.234375 0.140625 -0.375q0.15625 -0.15625 0.40625 -0.15625q0.234375 0 0.375 0.15625q0.140625 0.140625 0.140625 0.359375l0 0.6875q0.328125 -0.609375 0.890625 -0.921875q0.578125 -0.3125 1.3125 -0.3125zm7.304718 5.875q0.46875 0.03125 0.46875 0.421875q0 0.21875 -0.171875 0.34375q-0.171875 0.109375 -0.5 0.078125l-0.359375 -0.015625q-1.0625 -0.09375 -1.578125 -0.640625q-0.5 -0.5625 -0.5 -1.703125l0 -3.34375l-0.890625 0q-0.234375 0 -0.359375 -0.109375q-0.125 -0.109375 -0.125 -0.296875q0 -0.203125 0.125 -0.3125q0.125 -0.125 0.359375 -0.125l0.890625 0l0 -1.515625q0 -0.25 0.140625 -0.390625q0.15625 -0.140625 0.40625 -0.140625q0.234375 0 0.375 0.140625q0.15625 0.140625 0.15625 0.390625l0 1.515625l1.484375 0q0.203125 0 0.328125 0.125q0.140625 0.109375 0.140625 0.3125q0 0.1875 -0.140625 0.296875q-0.125 0.109375 -0.328125 0.109375l-1.484375 0l0 3.40625q0 0.734375 0.296875 1.0625q0.296875 0.3125 0.90625 0.359375l0.359375 0.03125z" fill-rule="nonzero"/><path fill="#f4cccc" d="m206.61942 201.17455l140.47244 0l0 30.992126l-140.47244 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m206.61942 201.17455l140.47244 0l0 30.992126l-140.47244 0z" fill-rule="evenodd"/><path fill="#000000" d="m237.0857 213.5031q-0.640625 0.046875 -0.96875 0.40625q-0.3125 0.34375 -0.3125 1.046875l0 0.390625l1.328125 0q0.203125 0 0.3125 0.109375q0.109375 0.109375 0.109375 0.28125q0 0.1875 -0.109375 0.28125q-0.109375 0.09375 -0.3125 0.09375l-1.328125 0l0 4.65625q0 0.234375 -0.140625 0.359375q-0.140625 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.140625 -0.125 -0.140625 -0.359375l0 -4.65625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -0.21875q0 -1.078125 0.53125 -1.6875q0.546875 -0.625 1.5625 -0.703125l0.3125 -0.015625q0.3125 -0.03125 0.453125 0.0625q0.140625 0.078125 0.140625 0.296875q0 0.34375 -0.421875 0.390625l-0.3125 0.03125zm4.248535 1.71875q0.5 -0.03125 0.5 0.40625q0 0.203125 -0.109375 0.3125q-0.109375 0.109375 -0.375 0.140625l-0.359375 0.03125q-0.796875 0.078125 -1.1875 0.578125q-0.390625 0.484375 -0.390625 1.15625l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.140625 -0.359375q0.140625 -0.125 0.34375 -0.125q0.1875 0 0.3125 0.125q0.140625 0.125 0.140625 0.34375l0 0.671875q0.25 -0.53125 0.71875 -0.796875q0.46875 -0.28125 1.0625 -0.328125l0.171875 -0.015625zm5.861023 4.609375q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375zm8.417801 3.875q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375zm8.199051 4.46875q0.203125 0 0.296875 0.109375q0.109375 0.09375 0.109375 0.265625q0 0.1875 -0.109375 0.296875q-0.09375 0.09375 -0.296875 0.09375l-4.203125 0q-0.203125 0 -0.34375 -0.125q-0.125 -0.125 -0.125 -0.3125q0 -0.1875 0.140625 -0.359375l3.546875 -4.28125l-3.28125 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l4.0625 0q0.21875 0 0.34375 0.125q0.140625 0.125 0.140625 0.3125q0 0.1875 -0.140625 0.359375l-3.5625 4.28125l3.421875 0zm6.2547913 -0.59375q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375zm3.3865662 5.875q-0.171875 0 -0.28125 -0.09375q-0.109375 -0.09375 -0.109375 -0.21875q0 -0.140625 0.109375 -0.234375q0.109375 -0.09375 0.28125 -0.09375l5.21875 0q0.171875 0 0.28125 0.09375q0.109375 0.09375 0.109375 0.234375q0 0.125 -0.109375 0.21875q-0.109375 0.09375 -0.28125 0.09375l-5.21875 0zm11.2500305 -6.609375q0.234375 0 0.359375 0.140625q0.125 0.125 0.125 0.34375l0 5.09375q0 1.296875 -0.671875 1.96875q-0.671875 0.671875 -1.984375 0.671875q-1.28125 0 -2.140625 -0.515625q-0.421875 -0.234375 -0.421875 -0.546875q0 -0.171875 0.078125 -0.28125q0.09375 -0.109375 0.234375 -0.109375q0.125 0 0.4375 0.171875q0.421875 0.21875 0.828125 0.34375q0.40625 0.140625 0.96875 0.140625q0.859375 0 1.28125 -0.453125q0.4375 -0.453125 0.4375 -1.3125l0 -1.03125q-0.25 0.5625 -0.78125 0.859375q-0.515625 0.296875 -1.21875 0.296875q-0.765625 0 -1.359375 -0.359375q-0.59375 -0.359375 -0.9375 -1.015625q-0.328125 -0.65625 -0.328125 -1.515625q0 -0.875 0.328125 -1.53125q0.34375 -0.65625 0.9375 -1.015625q0.59375 -0.359375 1.359375 -0.359375q0.6875 0 1.203125 0.296875q0.515625 0.296875 0.78125 0.84375l0 -0.640625q0 -0.21875 0.125 -0.34375q0.125 -0.140625 0.359375 -0.140625zm-2.28125 4.984375q0.84375 0 1.3125 -0.546875q0.484375 -0.5625 0.484375 -1.546875q0 -0.984375 -0.46875 -1.53125q-0.46875 -0.5625 -1.328125 -0.5625q-0.84375 0 -1.34375 0.5625q-0.484375 0.546875 -0.484375 1.53125q0 0.984375 0.484375 1.546875q0.5 0.546875 1.34375 0.546875zm7.4695435 -4.984375q0.5 -0.03125 0.5 0.40625q0 0.203125 -0.109375 0.3125q-0.109375 0.109375 -0.375 0.140625l-0.359375 0.03125q-0.796875 0.078125 -1.1875 0.578125q-0.390625 0.484375 -0.390625 1.15625l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.140625 -0.359375q0.140625 -0.125 0.34375 -0.125q0.1875 0 0.3125 0.125q0.140625 0.125 0.140625 0.34375l0 0.671875q0.25 -0.53125 0.71875 -0.796875q0.46875 -0.28125 1.0625 -0.328125l0.171875 -0.015625zm3.720398 -0.015625q2.203125 0 2.203125 2.296875l0 3.265625q0 0.21875 -0.125 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -0.578125q-0.21875 0.515625 -0.6875 0.796875q-0.46875 0.28125 -1.078125 0.28125q-0.5625 0 -1.046875 -0.21875q-0.46875 -0.234375 -0.75 -0.640625q-0.265625 -0.40625 -0.265625 -0.90625q0 -0.65625 0.328125 -1.015625q0.34375 -0.375 1.109375 -0.53125q0.765625 -0.15625 2.125 -0.15625l0.265625 0l0 -0.40625q0 -0.71875 -0.296875 -1.046875q-0.28125 -0.34375 -0.953125 -0.34375q-0.8125 0 -1.65625 0.453125q-0.3125 0.203125 -0.453125 0.203125q-0.140625 0 -0.234375 -0.109375q-0.09375 -0.109375 -0.09375 -0.28125q0 -0.171875 0.09375 -0.296875q0.109375 -0.125 0.328125 -0.25q0.421875 -0.25 0.953125 -0.375q0.546875 -0.140625 1.0625 -0.140625zm-0.390625 5.296875q0.71875 0 1.171875 -0.484375q0.46875 -0.484375 0.46875 -1.25l0 -0.34375l-0.21875 0q-1.046875 0 -1.609375 0.09375q-0.546875 0.078125 -0.78125 0.296875q-0.234375 0.203125 -0.234375 0.609375q0 0.46875 0.34375 0.78125q0.34375 0.296875 0.859375 0.296875zm7.3131714 -5.296875q0.765625 0 1.34375 0.390625q0.59375 0.375 0.921875 1.0625q0.328125 0.6875 0.328125 1.609375q0 0.90625 -0.328125 1.59375q-0.328125 0.671875 -0.90625 1.046875q-0.578125 0.359375 -1.359375 0.359375q-0.6875 0 -1.203125 -0.296875q-0.5 -0.296875 -0.765625 -0.84375l0 2.8125q0 0.21875 -0.125 0.34375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.140625q-0.125 -0.125 -0.125 -0.328125l0 -7.234375q0 -0.21875 0.125 -0.34375q0.125 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.140625q0.125 0.125 0.125 0.34375l0 0.640625q0.265625 -0.546875 0.765625 -0.84375q0.515625 -0.296875 1.203125 -0.296875zm-0.203125 5.265625q0.859375 0 1.328125 -0.578125q0.46875 -0.578125 0.46875 -1.625q0 -1.0625 -0.46875 -1.65625q-0.46875 -0.59375 -1.328125 -0.59375q-0.84375 0 -1.3125 0.578125q-0.453125 0.578125 -0.453125 1.640625q0 1.0625 0.453125 1.65625q0.46875 0.578125 1.3125 0.578125zm7.20282 -5.265625q1.03125 0 1.546875 0.578125q0.53125 0.578125 0.53125 1.734375l0 3.25q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -3.21875q0 -0.78125 -0.328125 -1.15625q-0.3125 -0.375 -1.0 -0.375q-0.8125 0 -1.296875 0.5q-0.46875 0.484375 -0.46875 1.328125l0 2.921875q0 0.234375 -0.125 0.359375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -7.625q0 -0.203125 0.125 -0.328125q0.140625 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.125q0.125 0.125 0.125 0.34375l0 3.140625q0.28125 -0.53125 0.796875 -0.796875q0.515625 -0.28125 1.1875 -0.28125zm4.331665 6.046875q-0.28125 0 -0.484375 -0.1875q-0.1875 -0.1875 -0.1875 -0.484375q0 -0.296875 0.1875 -0.484375q0.203125 -0.203125 0.484375 -0.203125q0.28125 0 0.46875 0.203125q0.1875 0.1875 0.1875 0.484375q0 0.296875 -0.1875 0.484375q-0.1875 0.1875 -0.46875 0.1875zm5.2167664 -6.046875q0.765625 0 1.34375 0.390625q0.59375 0.375 0.921875 1.0625q0.328125 0.6875 0.328125 1.609375q0 0.90625 -0.328125 1.59375q-0.328125 0.671875 -0.90625 1.046875q-0.578125 0.359375 -1.359375 0.359375q-0.6875 0 -1.203125 -0.296875q-0.5 -0.296875 -0.765625 -0.84375l0 2.8125q0 0.21875 -0.125 0.34375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.140625q-0.125 -0.125 -0.125 -0.328125l0 -7.234375q0 -0.21875 0.125 -0.34375q0.125 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.140625q0.125 0.125 0.125 0.34375l0 0.640625q0.265625 -0.546875 0.765625 -0.84375q0.515625 -0.296875 1.203125 -0.296875zm-0.203125 5.265625q0.859375 0 1.328125 -0.578125q0.46875 -0.578125 0.46875 -1.625q0 -1.0625 -0.46875 -1.65625q-0.46875 -0.59375 -1.328125 -0.59375q-0.84375 0 -1.3125 0.578125q-0.453125 0.578125 -0.453125 1.640625q0 1.0625 0.453125 1.65625q0.46875 0.578125 1.3125 0.578125zm8.45282 -4.9375q0.140625 -0.296875 0.421875 -0.296875q0.1875 0 0.328125 0.125q0.140625 0.109375 0.140625 0.296875q0 0.109375 -0.046875 0.1875l-3.375 7.28125q-0.0625 0.125 -0.171875 0.1875q-0.109375 0.078125 -0.234375 0.078125q-0.1875 0 -0.328125 -0.109375q-0.125 -0.109375 -0.125 -0.296875q0 -0.09375 0.046875 -0.1875l0.84375 -1.8125l-2.375 -5.140625q-0.046875 -0.078125 -0.046875 -0.171875q0 -0.1875 0.15625 -0.3125q0.15625 -0.140625 0.359375 -0.140625q0.109375 0 0.21875 0.078125q0.125 0.078125 0.1875 0.203125l2.0 4.5l2.0 -4.46875z" fill-rule="nonzero"/><path fill="#f4cccc" d="m132.49081 319.42978l87.49606 0l0 30.992126l-87.49606 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m132.49081 319.42978l87.49606 0l0 30.992126l-87.49606 0z" fill-rule="evenodd"/><path fill="#000000" d="m163.01448 339.50836q-0.234375 0 -0.375 -0.140625q-0.140625 -0.140625 -0.140625 -0.359375l0 -7.1875l-2.578125 0q-0.21875 0 -0.34375 -0.109375q-0.109375 -0.109375 -0.109375 -0.3125q0 -0.203125 0.109375 -0.296875q0.125 -0.109375 0.34375 -0.109375l6.15625 0q0.21875 0 0.328125 0.109375q0.125 0.09375 0.125 0.296875q0 0.203125 -0.125 0.3125q-0.109375 0.109375 -0.328125 0.109375l-2.578125 0l0 7.1875q0 0.21875 -0.140625 0.359375q-0.125 0.140625 -0.34375 0.140625zm8.160431 0.03125q-1.171875 0 -2.046875 -0.515625q-0.859375 -0.53125 -1.328125 -1.5q-0.46875 -0.984375 -0.46875 -2.296875q0 -1.34375 0.453125 -2.3125q0.46875 -0.984375 1.328125 -1.5q0.875 -0.53125 2.0625 -0.53125q1.1875 0 2.0625 0.53125q0.875 0.515625 1.328125 1.5q0.46875 0.96875 0.46875 2.296875q0 1.3125 -0.46875 2.296875q-0.46875 0.984375 -1.34375 1.515625q-0.859375 0.515625 -2.046875 0.515625zm0 -0.84375q1.34375 0 2.09375 -0.90625q0.75 -0.90625 0.75 -2.578125q0 -1.6875 -0.75 -2.578125q-0.734375 -0.90625 -2.09375 -0.90625q-1.34375 0 -2.09375 0.90625q-0.75 0.90625 -0.75 2.578125q0 1.671875 0.75 2.578125q0.75 0.90625 2.09375 0.90625zm9.214935 0.84375q-1.1875 0 -2.0625 -0.515625q-0.875 -0.53125 -1.359375 -1.5q-0.46875 -0.984375 -0.46875 -2.3125q0 -1.328125 0.46875 -2.296875q0.484375 -0.984375 1.359375 -1.5q0.875 -0.53125 2.0625 -0.53125q0.8125 0 1.515625 0.265625q0.71875 0.25 1.25 0.734375q0.1875 0.1875 0.1875 0.421875q0 0.171875 -0.09375 0.296875q-0.09375 0.125 -0.21875 0.125q-0.15625 0 -0.359375 -0.140625q-0.609375 -0.46875 -1.109375 -0.65625q-0.5 -0.203125 -1.140625 -0.203125q-1.390625 0 -2.140625 0.90625q-0.75 0.90625 -0.75 2.578125q0 1.671875 0.75 2.578125q0.75 0.90625 2.140625 0.90625q0.640625 0 1.140625 -0.1875q0.5 -0.1875 1.109375 -0.671875q0.203125 -0.125 0.359375 -0.125q0.125 0 0.21875 0.125q0.09375 0.109375 0.09375 0.296875q0 0.234375 -0.1875 0.40625q-0.53125 0.484375 -1.25 0.75q-0.703125 0.25 -1.515625 0.25zm8.077179 0q-1.171875 0 -2.046875 -0.515625q-0.859375 -0.53125 -1.328125 -1.5q-0.46875 -0.984375 -0.46875 -2.296875q0 -1.34375 0.453125 -2.3125q0.46875 -0.984375 1.328125 -1.5q0.875 -0.53125 2.0625 -0.53125q1.1875 0 2.0625 0.53125q0.875 0.515625 1.328125 1.5q0.46875 0.96875 0.46875 2.296875q0 1.3125 -0.46875 2.296875q-0.46875 0.984375 -1.34375 1.515625q-0.859375 0.515625 -2.046875 0.515625zm0 -0.84375q1.34375 0 2.09375 -0.90625q0.75 -0.90625 0.75 -2.578125q0 -1.6875 -0.75 -2.578125q-0.734375 -0.90625 -2.09375 -0.90625q-1.34375 0 -2.09375 0.90625q-0.75 0.90625 -0.75 2.578125q0 1.671875 0.75 2.578125q0.75 0.90625 2.09375 0.90625z" fill-rule="nonzero"/><path fill="#d9ead3" d="m284.12296 319.3983l87.49606 0l0 30.992126l-87.49606 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m284.12296 319.3983l87.49606 0l0 30.992126l-87.49606 0z" fill-rule="evenodd"/><path fill="#000000" d="m314.7006 332.47687q-0.234375 0 -0.375 -0.140625q-0.140625 -0.140625 -0.140625 -0.359375l0 -7.1875l-2.578125 0q-0.21875 0 -0.34375 -0.109375q-0.109375 -0.109375 -0.109375 -0.3125q0 -0.203125 0.109375 -0.296875q0.125 -0.109375 0.34375 -0.109375l6.15625 0q0.21875 0 0.328125 0.109375q0.125 0.09375 0.125 0.296875q0 0.203125 -0.125 0.3125q-0.109375 0.109375 -0.328125 0.109375l-2.578125 0l0 7.1875q0 0.21875 -0.140625 0.359375q-0.125 0.140625 -0.34375 0.140625zm5.113556 0q-0.21875 0 -0.359375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -7.546875q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.375 -0.125l4.375 0q0.203125 0 0.328125 0.109375q0.125 0.09375 0.125 0.296875q0 0.203125 -0.125 0.3125q-0.125 0.109375 -0.328125 0.109375l-3.90625 0l0 2.90625l3.65625 0q0.21875 0 0.328125 0.109375q0.125 0.109375 0.125 0.3125q0 0.1875 -0.125 0.296875q-0.109375 0.109375 -0.328125 0.109375l-3.65625 0l0 3.453125q0 0.21875 -0.125 0.359375q-0.125 0.140625 -0.359375 0.140625zm6.6840515 -0.0625q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.328125l0 -7.5625q0 -0.234375 0.125 -0.359375q0.140625 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.140625q0.140625 0.125 0.140625 0.359375l0 7.171875l3.875 0q0.21875 0 0.328125 0.109375q0.125 0.109375 0.125 0.3125q0 0.203125 -0.125 0.3125q-0.109375 0.109375 -0.328125 0.109375l-4.375 0zm6.3394165 0.0625q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.125 -0.359375q0.140625 -0.125 0.359375 -0.125q0.21875 0 0.34375 0.125q0.140625 0.125 0.140625 0.359375l0 5.0625q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125zm0 -7.28125q-0.296875 0 -0.484375 -0.171875q-0.171875 -0.171875 -0.171875 -0.453125q0 -0.25 0.171875 -0.421875q0.1875 -0.171875 0.484375 -0.171875q0.28125 0 0.453125 0.171875q0.1875 0.171875 0.1875 0.421875q0 0.28125 -0.1875 0.453125q-0.171875 0.171875 -0.453125 0.171875zm4.987152 6.515625q0.421875 0.03125 0.421875 0.375q0 0.203125 -0.15625 0.3125q-0.140625 0.09375 -0.4375 0.078125l-0.328125 -0.03125q-0.953125 -0.0625 -1.421875 -0.5625q-0.453125 -0.515625 -0.453125 -1.53125l0 -3.015625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -1.359375q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.34375l0 1.359375l1.328125 0q0.1875 0 0.296875 0.109375q0.125 0.109375 0.125 0.28125q0 0.171875 -0.125 0.28125q-0.109375 0.09375 -0.296875 0.09375l-1.328125 0l0 3.0625q0 0.65625 0.265625 0.953125q0.265625 0.296875 0.8125 0.328125l0.3125 0.03125zm5.9081726 -0.65625q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375z" fill-rule="nonzero"/><path fill="#000000" d="m303.37402 346.47687q-0.21875 0 -0.359375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -7.546875q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.375 -0.125l4.375 0q0.203125 0 0.328125 0.109375q0.125 0.09375 0.125 0.296875q0 0.203125 -0.125 0.3125q-0.125 0.109375 -0.328125 0.109375l-3.90625 0l0 2.90625l3.65625 0q0.21875 0 0.328125 0.109375q0.125 0.109375 0.125 0.3125q0 0.1875 -0.125 0.296875q-0.109375 0.109375 -0.328125 0.109375l-3.65625 0l0 3.453125q0 0.21875 -0.125 0.359375q-0.125 0.140625 -0.359375 0.140625zm6.5434265 0q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -7.625q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.359375 -0.125q0.203125 0 0.34375 0.125q0.140625 0.125 0.140625 0.34375l0 7.625q0 0.234375 -0.140625 0.359375q-0.140625 0.125 -0.34375 0.125zm4.674652 -6.046875q2.203125 0 2.203125 2.296875l0 3.265625q0 0.21875 -0.125 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -0.578125q-0.21875 0.515625 -0.6875 0.796875q-0.46875 0.28125 -1.078125 0.28125q-0.5625 0 -1.046875 -0.21875q-0.46875 -0.234375 -0.75 -0.640625q-0.265625 -0.40625 -0.265625 -0.90625q0 -0.65625 0.328125 -1.015625q0.34375 -0.375 1.109375 -0.53125q0.765625 -0.15625 2.125 -0.15625l0.265625 0l0 -0.40625q0 -0.71875 -0.296875 -1.046875q-0.28125 -0.34375 -0.953125 -0.34375q-0.8125 0 -1.65625 0.453125q-0.3125 0.203125 -0.453125 0.203125q-0.140625 0 -0.234375 -0.109375q-0.09375 -0.109375 -0.09375 -0.28125q0 -0.171875 0.09375 -0.296875q0.109375 -0.125 0.328125 -0.25q0.421875 -0.25 0.953125 -0.375q0.546875 -0.140625 1.0625 -0.140625zm-0.390625 5.296875q0.71875 0 1.171875 -0.484375q0.46875 -0.484375 0.46875 -1.25l0 -0.34375l-0.21875 0q-1.046875 0 -1.609375 0.09375q-0.546875 0.078125 -0.78125 0.296875q-0.234375 0.203125 -0.234375 0.609375q0 0.46875 0.34375 0.78125q0.34375 0.296875 0.859375 0.296875zm7.0631714 -0.015625q0.421875 0.03125 0.421875 0.375q0 0.203125 -0.15625 0.3125q-0.140625 0.09375 -0.4375 0.078125l-0.328125 -0.03125q-0.953125 -0.0625 -1.421875 -0.5625q-0.453125 -0.515625 -0.453125 -1.53125l0 -3.015625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -1.359375q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.34375l0 1.359375l1.328125 0q0.1875 0 0.296875 0.109375q0.125 0.109375 0.125 0.28125q0 0.171875 -0.125 0.28125q-0.109375 0.09375 -0.296875 0.09375l-1.328125 0l0 3.0625q0 0.65625 0.265625 0.953125q0.265625 0.296875 0.8125 0.328125l0.3125 0.03125zm4.3300476 -5.28125q0.765625 0 1.34375 0.375q0.59375 0.359375 0.921875 1.046875q0.328125 0.6875 0.328125 1.59375q0 0.90625 -0.328125 1.59375q-0.328125 0.6875 -0.921875 1.078125q-0.578125 0.375 -1.34375 0.375q-0.6875 0 -1.203125 -0.296875q-0.5 -0.296875 -0.765625 -0.84375l0 0.640625q0 0.21875 -0.125 0.359375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -7.625q0 -0.203125 0.125 -0.328125q0.125 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.125q0.125 0.125 0.125 0.34375l0 3.203125q0.265625 -0.546875 0.765625 -0.84375q0.515625 -0.296875 1.203125 -0.296875zm-0.203125 5.265625q0.859375 0 1.328125 -0.59375q0.46875 -0.59375 0.46875 -1.65625q0 -1.046875 -0.46875 -1.625q-0.46875 -0.578125 -1.328125 -0.578125q-0.84375 0 -1.3125 0.578125q-0.453125 0.578125 -0.453125 1.640625q0 1.0625 0.453125 1.65625q0.46875 0.578125 1.3125 0.578125zm8.687164 -5.25q0.21875 0 0.34375 0.140625q0.125 0.125 0.125 0.34375l0 5.078125q0 0.203125 -0.125 0.34375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.34375 -0.125q-0.125 -0.125 -0.125 -0.328125l0 -0.609375q-0.28125 0.53125 -0.78125 0.8125q-0.5 0.265625 -1.125 0.265625q-1.03125 0 -1.5625 -0.578125q-0.53125 -0.578125 -0.53125 -1.71875l0 -3.265625q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625q0.21875 0 0.34375 0.140625q0.125 0.125 0.125 0.34375l0 3.234375q0 0.78125 0.3125 1.15625q0.3125 0.359375 0.984375 0.359375q0.765625 0 1.234375 -0.5q0.46875 -0.5 0.46875 -1.3125l0 -2.9375q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625zm4.8726807 -1.71875q-0.640625 0.046875 -0.96875 0.40625q-0.3125 0.34375 -0.3125 1.046875l0 0.390625l1.328125 0q0.203125 0 0.3125 0.109375q0.109375 0.109375 0.109375 0.28125q0 0.1875 -0.109375 0.28125q-0.109375 0.09375 -0.3125 0.09375l-1.328125 0l0 4.65625q0 0.234375 -0.140625 0.359375q-0.140625 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.140625 -0.125 -0.140625 -0.359375l0 -4.65625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -0.21875q0 -1.078125 0.53125 -1.6875q0.546875 -0.625 1.5625 -0.703125l0.3125 -0.015625q0.3125 -0.03125 0.453125 0.0625q0.140625 0.078125 0.140625 0.296875q0 0.34375 -0.421875 0.390625l-0.3125 0.03125zm3.9360352 0q-0.640625 0.046875 -0.96875 0.40625q-0.3125 0.34375 -0.3125 1.046875l0 0.390625l1.328125 0q0.203125 0 0.3125 0.109375q0.109375 0.109375 0.109375 0.28125q0 0.1875 -0.109375 0.28125q-0.109375 0.09375 -0.3125 0.09375l-1.328125 0l0 4.65625q0 0.234375 -0.140625 0.359375q-0.140625 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.140625 -0.125 -0.140625 -0.359375l0 -4.65625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -0.21875q0 -1.078125 0.53125 -1.6875q0.546875 -0.625 1.5625 -0.703125l0.3125 -0.015625q0.3125 -0.03125 0.453125 0.0625q0.140625 0.078125 0.140625 0.296875q0 0.34375 -0.421875 0.390625l-0.3125 0.03125zm5.873535 6.328125q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375zm6.7927856 -0.734375q0.5 -0.03125 0.5 0.40625q0 0.203125 -0.109375 0.3125q-0.109375 0.109375 -0.375 0.140625l-0.359375 0.03125q-0.796875 0.078125 -1.1875 0.578125q-0.390625 0.484375 -0.390625 1.15625l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.140625 -0.359375q0.140625 -0.125 0.34375 -0.125q0.1875 0 0.3125 0.125q0.140625 0.125 0.140625 0.34375l0 0.671875q0.25 -0.53125 0.71875 -0.796875q0.46875 -0.28125 1.0625 -0.328125l0.171875 -0.015625z" fill-rule="nonzero"/><path fill="#f4cccc" d="m413.02625 319.3983l87.49606 0l0 30.992126l-87.49606 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m413.02625 319.3983l87.49606 0l0 30.992126l-87.49606 0z" fill-rule="evenodd"/><path fill="#000000" d="m443.6039 332.47687q-0.234375 0 -0.375 -0.140625q-0.140625 -0.140625 -0.140625 -0.359375l0 -7.1875l-2.578125 0q-0.21875 0 -0.34375 -0.109375q-0.109375 -0.109375 -0.109375 -0.3125q0 -0.203125 0.109375 -0.296875q0.125 -0.109375 0.34375 -0.109375l6.15625 0q0.21875 0 0.328125 0.109375q0.125 0.09375 0.125 0.296875q0 0.203125 -0.125 0.3125q-0.109375 0.109375 -0.328125 0.109375l-2.578125 0l0 7.1875q0 0.21875 -0.140625 0.359375q-0.125 0.140625 -0.34375 0.140625zm5.113556 0q-0.21875 0 -0.359375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -7.546875q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.375 -0.125l4.375 0q0.203125 0 0.328125 0.109375q0.125 0.09375 0.125 0.296875q0 0.203125 -0.125 0.3125q-0.125 0.109375 -0.328125 0.109375l-3.90625 0l0 2.90625l3.65625 0q0.21875 0 0.328125 0.109375q0.125 0.109375 0.125 0.3125q0 0.1875 -0.125 0.296875q-0.109375 0.109375 -0.328125 0.109375l-3.65625 0l0 3.453125q0 0.21875 -0.125 0.359375q-0.125 0.140625 -0.359375 0.140625zm6.6840515 -0.0625q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.328125l0 -7.5625q0 -0.234375 0.125 -0.359375q0.140625 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.140625q0.140625 0.125 0.140625 0.359375l0 7.171875l3.875 0q0.21875 0 0.328125 0.109375q0.125 0.109375 0.125 0.3125q0 0.203125 -0.125 0.3125q-0.109375 0.109375 -0.328125 0.109375l-4.375 0zm6.3394165 0.0625q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.125 -0.359375q0.140625 -0.125 0.359375 -0.125q0.21875 0 0.34375 0.125q0.140625 0.125 0.140625 0.359375l0 5.0625q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125zm0 -7.28125q-0.296875 0 -0.484375 -0.171875q-0.171875 -0.171875 -0.171875 -0.453125q0 -0.25 0.171875 -0.421875q0.1875 -0.171875 0.484375 -0.171875q0.28125 0 0.453125 0.171875q0.1875 0.171875 0.1875 0.421875q0 0.28125 -0.1875 0.453125q-0.171875 0.171875 -0.453125 0.171875zm4.987152 6.515625q0.421875 0.03125 0.421875 0.375q0 0.203125 -0.15625 0.3125q-0.140625 0.09375 -0.4375 0.078125l-0.328125 -0.03125q-0.953125 -0.0625 -1.421875 -0.5625q-0.453125 -0.515625 -0.453125 -1.53125l0 -3.015625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -1.359375q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.34375l0 1.359375l1.328125 0q0.1875 0 0.296875 0.109375q0.125 0.109375 0.125 0.28125q0 0.171875 -0.125 0.28125q-0.109375 0.09375 -0.296875 0.09375l-1.328125 0l0 3.0625q0 0.65625 0.265625 0.953125q0.265625 0.296875 0.8125 0.328125l0.3125 0.03125zm5.908142 -0.65625q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375z" fill-rule="nonzero"/><path fill="#000000" d="m429.9527 346.47687q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.125 -0.359375q0.140625 -0.125 0.359375 -0.125q0.21875 0 0.34375 0.125q0.140625 0.125 0.140625 0.359375l0 5.0625q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125zm0 -7.28125q-0.296875 0 -0.484375 -0.171875q-0.171875 -0.171875 -0.171875 -0.453125q0 -0.25 0.171875 -0.421875q0.1875 -0.171875 0.484375 -0.171875q0.28125 0 0.453125 0.171875q0.1875 0.171875 0.1875 0.421875q0 0.28125 -0.1875 0.453125q-0.171875 0.171875 -0.453125 0.171875zm5.237152 1.234375q2.09375 0 2.09375 2.3125l0 3.25q0 0.234375 -0.125 0.359375q-0.125 0.125 -0.359375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -3.1875q0 -0.8125 -0.328125 -1.1875q-0.3125 -0.375 -1.0 -0.375q-0.8125 0 -1.296875 0.5q-0.46875 0.484375 -0.46875 1.328125l0 2.921875q0 0.234375 -0.125 0.359375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.21875 0.125 -0.34375q0.125 -0.140625 0.359375 -0.140625q0.21875 0 0.34375 0.140625q0.125 0.125 0.125 0.328125l0 0.609375q0.28125 -0.53125 0.796875 -0.8125q0.53125 -0.28125 1.1875 -0.28125zm6.56604 5.28125q0.421875 0.03125 0.421875 0.375q0 0.203125 -0.15625 0.3125q-0.140625 0.09375 -0.4375 0.078125l-0.328125 -0.03125q-0.953125 -0.0625 -1.421875 -0.5625q-0.453125 -0.515625 -0.453125 -1.53125l0 -3.015625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -1.359375q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.34375l0 1.359375l1.328125 0q0.1875 0 0.296875 0.109375q0.125 0.109375 0.125 0.28125q0 0.171875 -0.125 0.28125q-0.109375 0.09375 -0.296875 0.09375l-1.328125 0l0 3.0625q0 0.65625 0.265625 0.953125q0.265625 0.296875 0.8125 0.328125l0.3125 0.03125zm5.9081726 -0.65625q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375zm6.7927856 -0.734375q0.5 -0.03125 0.5 0.40625q0 0.203125 -0.109375 0.3125q-0.109375 0.109375 -0.375 0.140625l-0.359375 0.03125q-0.796875 0.078125 -1.1875 0.578125q-0.390625 0.484375 -0.390625 1.15625l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.140625 -0.359375q0.140625 -0.125 0.34375 -0.125q0.1875 0 0.3125 0.125q0.140625 0.125 0.140625 0.34375l0 0.671875q0.25 -0.53125 0.71875 -0.796875q0.46875 -0.28125 1.0625 -0.328125l0.171875 -0.015625zm4.282898 -0.015625q0.765625 0 1.34375 0.390625q0.59375 0.375 0.921875 1.0625q0.328125 0.6875 0.328125 1.609375q0 0.90625 -0.328125 1.59375q-0.328125 0.671875 -0.90625 1.046875q-0.578125 0.359375 -1.359375 0.359375q-0.6875 0 -1.203125 -0.296875q-0.5 -0.296875 -0.765625 -0.84375l0 2.8125q0 0.21875 -0.125 0.34375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.140625q-0.125 -0.125 -0.125 -0.328125l0 -7.234375q0 -0.21875 0.125 -0.34375q0.125 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.140625q0.125 0.125 0.125 0.34375l0 0.640625q0.265625 -0.546875 0.765625 -0.84375q0.515625 -0.296875 1.203125 -0.296875zm-0.203125 5.265625q0.859375 0 1.328125 -0.578125q0.46875 -0.578125 0.46875 -1.625q0 -1.0625 -0.46875 -1.65625q-0.46875 -0.59375 -1.328125 -0.59375q-0.84375 0 -1.3125 0.578125q-0.453125 0.578125 -0.453125 1.640625q0 1.0625 0.453125 1.65625q0.46875 0.578125 1.3125 0.578125zm7.14032 -5.25q0.5 -0.03125 0.5 0.40625q0 0.203125 -0.109375 0.3125q-0.109375 0.109375 -0.375 0.140625l-0.359375 0.03125q-0.796875 0.078125 -1.1875 0.578125q-0.390625 0.484375 -0.390625 1.15625l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.140625 -0.359375q0.140625 -0.125 0.34375 -0.125q0.1875 0 0.3125 0.125q0.140625 0.125 0.140625 0.34375l0 0.671875q0.25 -0.53125 0.71875 -0.796875q0.46875 -0.28125 1.0625 -0.328125l0.171875 -0.015625zm5.861023 4.609375q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375zm6.5896606 4.53125q0.421875 0.03125 0.421875 0.375q0 0.203125 -0.15625 0.3125q-0.140625 0.09375 -0.4375 0.078125l-0.328125 -0.03125q-0.953125 -0.0625 -1.421875 -0.5625q-0.453125 -0.515625 -0.453125 -1.53125l0 -3.015625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -1.359375q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.34375l0 1.359375l1.328125 0q0.1875 0 0.296875 0.109375q0.125 0.109375 0.125 0.28125q0 0.171875 -0.125 0.28125q-0.109375 0.09375 -0.296875 0.09375l-1.328125 0l0 3.0625q0 0.65625 0.265625 0.953125q0.265625 0.296875 0.8125 0.328125l0.3125 0.03125zm5.9081726 -0.65625q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375zm6.7927856 -0.734375q0.5 -0.03125 0.5 0.40625q0 0.203125 -0.109375 0.3125q-0.109375 0.109375 -0.375 0.140625l-0.359375 0.03125q-0.796875 0.078125 -1.1875 0.578125q-0.390625 0.484375 -0.390625 1.15625l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.140625 -0.359375q0.140625 -0.125 0.34375 -0.125q0.1875 0 0.3125 0.125q0.140625 0.125 0.140625 0.34375l0 0.671875q0.25 -0.53125 0.71875 -0.796875q0.46875 -0.28125 1.0625 -0.328125l0.171875 -0.015625z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m371.61902 334.89435l41.417297 0" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m371.61902 334.89435l37.990234 0" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m409.60925 334.89435l-1.1245728 1.1246033l3.0897522 -1.1246033l-3.0897522 -1.1245728z" fill-rule="evenodd"/><path fill="#c9daf8" d="m548.5407 277.52954l87.49603 0l0 30.992126l-87.49603 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m548.5407 277.52954l87.49603 0l0 30.992126l-87.49603 0z" fill-rule="evenodd"/><path fill="#000000" d="m587.0588 293.13934q0.1875 0 0.296875 0.109375q0.109375 0.109375 0.109375 0.296875l0 2.984375q0 0.296875 -0.09375 0.4375q-0.078125 0.140625 -0.328125 0.234375q-0.46875 0.203125 -1.15625 0.328125q-0.6875 0.109375 -1.375 0.109375q-1.25 0 -2.171875 -0.515625q-0.90625 -0.515625 -1.390625 -1.484375q-0.484375 -0.96875 -0.484375 -2.328125q0 -1.328125 0.46875 -2.296875q0.484375 -0.984375 1.375 -1.5q0.90625 -0.53125 2.125 -0.53125q0.84375 0 1.5625 0.265625q0.71875 0.25 1.203125 0.734375q0.21875 0.203125 0.21875 0.421875q0 0.171875 -0.109375 0.296875q-0.09375 0.125 -0.234375 0.125q-0.140625 0 -0.328125 -0.140625q-0.625 -0.484375 -1.140625 -0.671875q-0.5 -0.1875 -1.15625 -0.1875q-1.4375 0 -2.203125 0.90625q-0.75 0.890625 -0.75 2.578125q0 1.71875 0.765625 2.609375q0.78125 0.890625 2.28125 0.890625q1.109375 0 2.03125 -0.328125l0 -2.578125l-1.75 0q-0.203125 0 -0.328125 -0.109375q-0.125 -0.109375 -0.125 -0.265625q0 -0.1875 0.125 -0.28125q0.125 -0.109375 0.328125 -0.109375l2.234375 0zm2.8911743 4.46875q-0.21875 0 -0.359375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -7.546875q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.375 -0.125l2.84375 0q1.328125 0 2.0625 0.65625q0.75 0.640625 0.75 1.828125q0 1.1875 -0.75 1.84375q-0.734375 0.65625 -2.0625 0.65625l-2.359375 0l0 3.03125q0 0.21875 -0.140625 0.359375q-0.125 0.140625 -0.359375 0.140625zm2.765625 -4.34375q1.9375 0 1.9375 -1.6875q0 -1.671875 -1.9375 -1.671875l-2.265625 0l0 3.359375l2.265625 0zm7.7869263 4.375q-1.65625 0 -2.515625 -0.859375q-0.84375 -0.859375 -0.84375 -2.546875l0 -4.703125q0 -0.234375 0.125 -0.359375q0.140625 -0.140625 0.359375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.359375l0 4.78125q0 1.25 0.609375 1.875q0.609375 0.609375 1.78125 0.609375q1.171875 0 1.765625 -0.609375q0.609375 -0.625 0.609375 -1.875l0 -4.78125q0 -0.234375 0.140625 -0.359375q0.140625 -0.140625 0.359375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.359375l0 4.703125q0 1.671875 -0.859375 2.546875q-0.859375 0.859375 -2.5 0.859375z" fill-rule="nonzero"/><path fill="#c9daf8" d="m548.5407 319.3983l87.49603 0l0 30.992126l-87.49603 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m548.5407 319.3983l87.49603 0l0 30.992126l-87.49603 0z" fill-rule="evenodd"/><path fill="#000000" d="m584.63763 339.50812q-1.1875 0 -2.0625 -0.515625q-0.875 -0.53125 -1.359375 -1.5q-0.46875 -0.984375 -0.46875 -2.3125q0 -1.328125 0.46875 -2.296875q0.484375 -0.984375 1.359375 -1.5q0.875 -0.53125 2.0625 -0.53125q0.8125 0 1.515625 0.265625q0.71875 0.25 1.25 0.734375q0.1875 0.1875 0.1875 0.421875q0 0.171875 -0.09375 0.296875q-0.09375 0.125 -0.21875 0.125q-0.15625 0 -0.359375 -0.140625q-0.609375 -0.46875 -1.109375 -0.65625q-0.5 -0.203125 -1.140625 -0.203125q-1.390625 0 -2.140625 0.90625q-0.75 0.90625 -0.75 2.578125q0 1.671875 0.75 2.578125q0.75 0.90625 2.140625 0.90625q0.640625 0 1.140625 -0.1875q0.5 -0.1875 1.109375 -0.671875q0.203125 -0.125 0.359375 -0.125q0.125 0 0.21875 0.125q0.09375 0.109375 0.09375 0.296875q0 0.234375 -0.1875 0.40625q-0.53125 0.484375 -1.25 0.75q-0.703125 0.25 -1.515625 0.25zm5.0302734 -0.03125q-0.21875 0 -0.359375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -7.546875q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.375 -0.125l2.84375 0q1.328125 0 2.0625 0.65625q0.75 0.640625 0.75 1.828125q0 1.1875 -0.75 1.84375q-0.734375 0.65625 -2.0625 0.65625l-2.359375 0l0 3.03125q0 0.21875 -0.140625 0.359375q-0.125 0.140625 -0.359375 0.140625zm2.765625 -4.34375q1.9375 0 1.9375 -1.6875q0 -1.671875 -1.9375 -1.671875l-2.265625 0l0 3.359375l2.265625 0zm7.7869263 4.375q-1.65625 0 -2.515625 -0.859375q-0.84375 -0.859375 -0.84375 -2.546875l0 -4.703125q0 -0.234375 0.125 -0.359375q0.140625 -0.140625 0.359375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.359375l0 4.78125q0 1.25 0.609375 1.875q0.609375 0.609375 1.78125 0.609375q1.171875 0 1.765625 -0.609375q0.609375 -0.625 0.609375 -1.875l0 -4.78125q0 -0.234375 0.140625 -0.359375q0.140625 -0.140625 0.359375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.359375l0 4.703125q0 1.671875 -0.859375 2.546875q-0.859375 0.859375 -2.5 0.859375z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m219.98688 334.92584l64.12598 -0.03149414" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m219.98688 334.92584l60.698914 -0.029815674" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m280.68576 334.89603l-1.1240234 1.1251526l3.0892334 -1.1260986l-3.090332 -1.1230774z" fill-rule="evenodd"/><path fill="#d9ead3" d="m413.02625 141.28871l20.53543 0l0 20.53543l-20.53543 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m413.02625 141.28871l20.53543 0l0 20.53543l-20.53543 0z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m437.52493 135.68242l73.763794 0l0 31.748032l-73.763794 0z" fill-rule="evenodd"/><path fill="#000000" d="m448.0718 156.20241q-0.234375 0 -0.375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -7.5q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.375 -0.125l2.34375 0q2.03125 0 3.140625 1.09375q1.109375 1.09375 1.109375 3.125q0 2.03125 -1.125 3.140625q-1.109375 1.09375 -3.125 1.09375l-2.34375 0zm2.28125 -0.84375q3.28125 0 3.28125 -3.390625q0 -3.390625 -3.28125 -3.390625l-1.796875 0l0 6.78125l1.796875 0zm8.3211975 -5.140625q2.203125 0 2.203125 2.296875l0 3.265625q0 0.21875 -0.125 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -0.578125q-0.21875 0.515625 -0.6875 0.796875q-0.46875 0.28125 -1.078125 0.28125q-0.5625 0 -1.046875 -0.21875q-0.46875 -0.234375 -0.75 -0.640625q-0.265625 -0.40625 -0.265625 -0.90625q0 -0.65625 0.328125 -1.015625q0.34375 -0.375 1.109375 -0.53125q0.765625 -0.15625 2.125 -0.15625l0.265625 0l0 -0.40625q0 -0.71875 -0.296875 -1.046875q-0.28125 -0.34375 -0.953125 -0.34375q-0.8125 0 -1.65625 0.453125q-0.3125 0.203125 -0.453125 0.203125q-0.140625 0 -0.234375 -0.109375q-0.09375 -0.109375 -0.09375 -0.28125q0 -0.171875 0.09375 -0.296875q0.109375 -0.125 0.328125 -0.25q0.421875 -0.25 0.953125 -0.375q0.546875 -0.140625 1.0625 -0.140625zm-0.390625 5.296875q0.71875 0 1.171875 -0.484375q0.46875 -0.484375 0.46875 -1.25l0 -0.34375l-0.21875 0q-1.046875 0 -1.609375 0.09375q-0.546875 0.078125 -0.78125 0.296875q-0.234375 0.203125 -0.234375 0.609375q0 0.46875 0.34375 0.78125q0.34375 0.296875 0.859375 0.296875zm7.0631714 -0.015625q0.421875 0.03125 0.421875 0.375q0 0.203125 -0.15625 0.3125q-0.140625 0.09375 -0.4375 0.078125l-0.328125 -0.03125q-0.953125 -0.0625 -1.421875 -0.5625q-0.453125 -0.515625 -0.453125 -1.53125l0 -3.015625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -1.359375q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.34375l0 1.359375l1.328125 0q0.1875 0 0.296875 0.109375q0.125 0.109375 0.125 0.28125q0 0.171875 -0.125 0.28125q-0.109375 0.09375 -0.296875 0.09375l-1.328125 0l0 3.0625q0 0.65625 0.265625 0.953125q0.265625 0.296875 0.8125 0.328125l0.3125 0.03125zm3.767517 -5.28125q2.203125 0 2.203125 2.296875l0 3.265625q0 0.21875 -0.125 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -0.578125q-0.21875 0.515625 -0.6875 0.796875q-0.46875 0.28125 -1.078125 0.28125q-0.5625 0 -1.046875 -0.21875q-0.46875 -0.234375 -0.75 -0.640625q-0.265625 -0.40625 -0.265625 -0.90625q0 -0.65625 0.328125 -1.015625q0.34375 -0.375 1.109375 -0.53125q0.765625 -0.15625 2.125 -0.15625l0.265625 0l0 -0.40625q0 -0.71875 -0.296875 -1.046875q-0.28125 -0.34375 -0.953125 -0.34375q-0.8125 0 -1.65625 0.453125q-0.3125 0.203125 -0.453125 0.203125q-0.140625 0 -0.234375 -0.109375q-0.09375 -0.109375 -0.09375 -0.28125q0 -0.171875 0.09375 -0.296875q0.109375 -0.125 0.328125 -0.25q0.421875 -0.25 0.953125 -0.375q0.546875 -0.140625 1.0625 -0.140625zm-0.390625 5.296875q0.71875 0 1.171875 -0.484375q0.46875 -0.484375 0.46875 -1.25l0 -0.34375l-0.21875 0q-1.046875 0 -1.609375 0.09375q-0.546875 0.078125 -0.78125 0.296875q-0.234375 0.203125 -0.234375 0.609375q0 0.46875 0.34375 0.78125q0.34375 0.296875 0.859375 0.296875zm10.15921 0.75q-0.234375 0 -0.375 -0.140625q-0.140625 -0.140625 -0.140625 -0.359375l0 -7.1875l-2.578125 0q-0.21875 0 -0.34375 -0.109375q-0.109375 -0.109375 -0.109375 -0.3125q0 -0.203125 0.109375 -0.296875q0.125 -0.109375 0.34375 -0.109375l6.15625 0q0.21875 0 0.328125 0.109375q0.125 0.09375 0.125 0.296875q0 0.203125 -0.125 0.3125q-0.109375 0.109375 -0.328125 0.109375l-2.578125 0l0 7.1875q0 0.21875 -0.140625 0.359375q-0.125 0.140625 -0.34375 0.140625zm8.691681 -5.71875q0.140625 -0.296875 0.421875 -0.296875q0.1875 0 0.328125 0.125q0.140625 0.109375 0.140625 0.296875q0 0.109375 -0.046875 0.1875l-3.375 7.28125q-0.0625 0.125 -0.171875 0.1875q-0.109375 0.078125 -0.234375 0.078125q-0.1875 0 -0.328125 -0.109375q-0.125 -0.109375 -0.125 -0.296875q0 -0.09375 0.046875 -0.1875l0.84375 -1.8125l-2.375 -5.140625q-0.046875 -0.078125 -0.046875 -0.171875q0 -0.1875 0.15625 -0.3125q0.15625 -0.140625 0.359375 -0.140625q0.109375 0 0.21875 0.078125q0.125 0.078125 0.1875 0.203125l2.0 4.5l2.0 -4.46875zm4.902405 -0.328125q0.765625 0 1.34375 0.390625q0.59375 0.375 0.921875 1.0625q0.328125 0.6875 0.328125 1.609375q0 0.90625 -0.328125 1.59375q-0.328125 0.671875 -0.90625 1.046875q-0.578125 0.359375 -1.359375 0.359375q-0.6875 0 -1.203125 -0.296875q-0.5 -0.296875 -0.765625 -0.84375l0 2.8125q0 0.21875 -0.125 0.34375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.140625q-0.125 -0.125 -0.125 -0.328125l0 -7.234375q0 -0.21875 0.125 -0.34375q0.125 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.140625q0.125 0.125 0.125 0.34375l0 0.640625q0.265625 -0.546875 0.765625 -0.84375q0.515625 -0.296875 1.203125 -0.296875zm-0.203125 5.265625q0.859375 0 1.328125 -0.578125q0.46875 -0.578125 0.46875 -1.625q0 -1.0625 -0.46875 -1.65625q-0.46875 -0.59375 -1.328125 -0.59375q-0.84375 0 -1.3125 0.578125q-0.453125 0.578125 -0.453125 1.640625q0 1.0625 0.453125 1.65625q0.46875 0.578125 1.3125 0.578125zm8.76532 -0.640625q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375z" fill-rule="nonzero"/><path fill="#f4cccc" d="m519.9029 141.28871l20.5354 0l0 20.53543l-20.5354 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m519.9029 141.28871l20.5354 0l0 20.53543l-20.5354 0z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m544.40155 135.68242l100.0 0l0 31.748032l-100.0 0z" fill-rule="evenodd"/><path fill="#000000" d="m554.9328 156.26491q-0.21875 0 -0.359375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -7.578125q0 -0.234375 0.125 -0.359375q0.140625 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.140625q0.140625 0.125 0.140625 0.359375l0 7.578125q0 0.21875 -0.140625 0.359375q-0.125 0.140625 -0.359375 0.140625zm5.3845215 -6.046875q2.09375 0 2.09375 2.3125l0 3.25q0 0.234375 -0.125 0.359375q-0.125 0.125 -0.359375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -3.1875q0 -0.8125 -0.328125 -1.1875q-0.3125 -0.375 -1.0 -0.375q-0.8125 0 -1.296875 0.5q-0.46875 0.484375 -0.46875 1.328125l0 2.921875q0 0.234375 -0.125 0.359375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.21875 0.125 -0.34375q0.125 -0.140625 0.359375 -0.140625q0.21875 0 0.34375 0.140625q0.125 0.125 0.125 0.328125l0 0.609375q0.28125 -0.53125 0.796875 -0.8125q0.53125 -0.28125 1.1875 -0.28125zm6.456726 -1.703125q-0.640625 0.046875 -0.96875 0.40625q-0.3125 0.34375 -0.3125 1.046875l0 0.390625l1.328125 0q0.203125 0 0.3125 0.109375q0.109375 0.109375 0.109375 0.28125q0 0.1875 -0.109375 0.28125q-0.109375 0.09375 -0.3125 0.09375l-1.328125 0l0 4.65625q0 0.234375 -0.140625 0.359375q-0.140625 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.140625 -0.125 -0.140625 -0.359375l0 -4.65625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -0.21875q0 -1.078125 0.53125 -1.6875q0.546875 -0.625 1.5625 -0.703125l0.3125 -0.015625q0.3125 -0.03125 0.453125 0.0625q0.140625 0.078125 0.140625 0.296875q0 0.34375 -0.421875 0.390625l-0.3125 0.03125zm4.248535 1.71875q0.5 -0.03125 0.5 0.40625q0 0.203125 -0.109375 0.3125q-0.109375 0.109375 -0.375 0.140625l-0.359375 0.03125q-0.796875 0.078125 -1.1875 0.578125q-0.390625 0.484375 -0.390625 1.15625l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.140625 -0.359375q0.140625 -0.125 0.34375 -0.125q0.1875 0 0.3125 0.125q0.140625 0.125 0.140625 0.34375l0 0.671875q0.25 -0.53125 0.71875 -0.796875q0.46875 -0.28125 1.0625 -0.328125l0.171875 -0.015625zm3.720398 -0.015625q2.203125 0 2.203125 2.296875l0 3.265625q0 0.21875 -0.125 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -0.578125q-0.21875 0.515625 -0.6875 0.796875q-0.46875 0.28125 -1.078125 0.28125q-0.5625 0 -1.046875 -0.21875q-0.46875 -0.234375 -0.75 -0.640625q-0.265625 -0.40625 -0.265625 -0.90625q0 -0.65625 0.328125 -1.015625q0.34375 -0.375 1.109375 -0.53125q0.765625 -0.15625 2.125 -0.15625l0.265625 0l0 -0.40625q0 -0.71875 -0.296875 -1.046875q-0.28125 -0.34375 -0.953125 -0.34375q-0.8125 0 -1.65625 0.453125q-0.3125 0.203125 -0.453125 0.203125q-0.140625 0 -0.234375 -0.109375q-0.09375 -0.109375 -0.09375 -0.28125q0 -0.171875 0.09375 -0.296875q0.109375 -0.125 0.328125 -0.25q0.421875 -0.25 0.953125 -0.375q0.546875 -0.140625 1.0625 -0.140625zm-0.390625 5.296875q0.71875 0 1.171875 -0.484375q0.46875 -0.484375 0.46875 -1.25l0 -0.34375l-0.21875 0q-1.046875 0 -1.609375 0.09375q-0.546875 0.078125 -0.78125 0.296875q-0.234375 0.203125 -0.234375 0.609375q0 0.46875 0.34375 0.78125q0.34375 0.296875 0.859375 0.296875zm6.3444214 0.765625q-0.5625 0 -1.0625 -0.125q-0.5 -0.140625 -0.875 -0.375q-0.21875 -0.140625 -0.3125 -0.265625q-0.078125 -0.125 -0.078125 -0.3125q0 -0.15625 0.078125 -0.25q0.09375 -0.109375 0.234375 -0.109375q0.15625 0 0.421875 0.1875q0.359375 0.21875 0.71875 0.34375q0.359375 0.125 0.875 0.125q0.65625 0 1.015625 -0.21875q0.359375 -0.234375 0.359375 -0.671875q0 -0.265625 -0.140625 -0.421875q-0.125 -0.171875 -0.453125 -0.296875q-0.3125 -0.125 -0.9375 -0.25q-1.0625 -0.234375 -1.515625 -0.609375q-0.453125 -0.390625 -0.453125 -1.046875q0 -0.515625 0.28125 -0.90625q0.28125 -0.40625 0.796875 -0.625q0.515625 -0.234375 1.15625 -0.234375q0.46875 0 0.90625 0.125q0.4375 0.125 0.78125 0.34375q0.40625 0.296875 0.40625 0.609375q0 0.15625 -0.09375 0.265625q-0.09375 0.109375 -0.234375 0.109375q-0.140625 0 -0.4375 -0.203125q-0.328125 -0.21875 -0.625 -0.34375q-0.296875 -0.125 -0.75 -0.125q-0.5625 0 -0.90625 0.265625q-0.34375 0.25 -0.34375 0.671875q0 0.25 0.125 0.421875q0.125 0.15625 0.421875 0.28125q0.296875 0.125 0.84375 0.25q0.828125 0.1875 1.265625 0.40625q0.453125 0.203125 0.640625 0.515625q0.203125 0.3125 0.203125 0.796875q0 0.75 -0.640625 1.21875q-0.640625 0.453125 -1.671875 0.453125zm6.47876 -0.78125q0.421875 0.03125 0.421875 0.375q0 0.203125 -0.15625 0.3125q-0.140625 0.09375 -0.4375 0.078125l-0.328125 -0.03125q-0.953125 -0.0625 -1.421875 -0.5625q-0.453125 -0.515625 -0.453125 -1.53125l0 -3.015625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -1.359375q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.34375l0 1.359375l1.328125 0q0.1875 0 0.296875 0.109375q0.125 0.109375 0.125 0.28125q0 0.171875 -0.125 0.28125q-0.109375 0.09375 -0.296875 0.09375l-1.328125 0l0 3.0625q0 0.65625 0.265625 0.953125q0.265625 0.296875 0.8125 0.328125l0.3125 0.03125zm4.283142 -5.265625q0.5 -0.03125 0.5 0.40625q0 0.203125 -0.109375 0.3125q-0.109375 0.109375 -0.375 0.140625l-0.359375 0.03125q-0.796875 0.078125 -1.1875 0.578125q-0.390625 0.484375 -0.390625 1.15625l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.140625 -0.359375q0.140625 -0.125 0.34375 -0.125q0.1875 0 0.3125 0.125q0.140625 0.125 0.140625 0.34375l0 0.671875q0.25 -0.53125 0.71875 -0.796875q0.46875 -0.28125 1.0625 -0.328125l0.171875 -0.015625zm5.782898 0q0.21875 0 0.34375 0.140625q0.125 0.125 0.125 0.34375l0 5.078125q0 0.203125 -0.125 0.34375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.34375 -0.125q-0.125 -0.125 -0.125 -0.328125l0 -0.609375q-0.28125 0.53125 -0.78125 0.8125q-0.5 0.265625 -1.125 0.265625q-1.03125 0 -1.5625 -0.578125q-0.53125 -0.578125 -0.53125 -1.71875l0 -3.265625q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625q0.21875 0 0.34375 0.140625q0.125 0.125 0.125 0.34375l0 3.234375q0 0.78125 0.3125 1.15625q0.3125 0.359375 0.984375 0.359375q0.765625 0 1.234375 -0.5q0.46875 -0.5 0.46875 -1.3125l0 -2.9375q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625zm4.7008057 6.046875q-0.8125 0 -1.453125 -0.359375q-0.625 -0.375 -0.96875 -1.0625q-0.34375 -0.6875 -0.34375 -1.578125q0 -0.90625 0.359375 -1.59375q0.359375 -0.703125 0.984375 -1.078125q0.640625 -0.390625 1.46875 -0.390625q0.453125 0 0.90625 0.125q0.453125 0.125 0.78125 0.359375q0.21875 0.140625 0.3125 0.28125q0.09375 0.140625 0.09375 0.3125q0 0.171875 -0.09375 0.28125q-0.09375 0.09375 -0.234375 0.09375q-0.078125 0 -0.1875 -0.046875q-0.09375 -0.046875 -0.15625 -0.09375q-0.0625 -0.046875 -0.09375 -0.0625q-0.3125 -0.203125 -0.59375 -0.3125q-0.28125 -0.125 -0.6875 -0.125q-0.875 0 -1.359375 0.59375q-0.484375 0.59375 -0.484375 1.65625q0 1.046875 0.484375 1.625q0.484375 0.578125 1.359375 0.578125q0.40625 0 0.703125 -0.109375q0.296875 -0.125 0.59375 -0.328125q0.140625 -0.09375 0.25 -0.15625q0.125 -0.0625 0.203125 -0.0625q0.140625 0 0.21875 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.15625 -0.09375 0.28125q-0.078125 0.125 -0.296875 0.28125q-0.34375 0.234375 -0.8125 0.375q-0.46875 0.125 -0.953125 0.125zm6.029297 -0.78125q0.421875 0.03125 0.421875 0.375q0 0.203125 -0.15625 0.3125q-0.140625 0.09375 -0.4375 0.078125l-0.328125 -0.03125q-0.953125 -0.0625 -1.421875 -0.5625q-0.453125 -0.515625 -0.453125 -1.53125l0 -3.015625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -1.359375q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.34375l0 1.359375l1.328125 0q0.1875 0 0.296875 0.109375q0.125 0.109375 0.125 0.28125q0 0.171875 -0.125 0.28125q-0.109375 0.09375 -0.296875 0.09375l-1.328125 0l0 3.0625q0 0.65625 0.265625 0.953125q0.265625 0.296875 0.8125 0.328125l0.3125 0.03125zm5.830017 -5.265625q0.21875 0 0.34375 0.140625q0.125 0.125 0.125 0.34375l0 5.078125q0 0.203125 -0.125 0.34375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.34375 -0.125q-0.125 -0.125 -0.125 -0.328125l0 -0.609375q-0.28125 0.53125 -0.78125 0.8125q-0.5 0.265625 -1.125 0.265625q-1.03125 0 -1.5625 -0.578125q-0.53125 -0.578125 -0.53125 -1.71875l0 -3.265625q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625q0.21875 0 0.34375 0.140625q0.125 0.125 0.125 0.34375l0 3.234375q0 0.78125 0.3125 1.15625q0.3125 0.359375 0.984375 0.359375q0.765625 0 1.234375 -0.5q0.46875 -0.5 0.46875 -1.3125l0 -2.9375q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625zm5.1851807 0q0.5 -0.03125 0.5 0.40625q0 0.203125 -0.109375 0.3125q-0.109375 0.109375 -0.375 0.140625l-0.359375 0.03125q-0.796875 0.078125 -1.1875 0.578125q-0.390625 0.484375 -0.390625 1.15625l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.140625 -0.359375q0.140625 -0.125 0.34375 -0.125q0.1875 0 0.3125 0.125q0.140625 0.125 0.140625 0.34375l0 0.671875q0.25 -0.53125 0.71875 -0.796875q0.46875 -0.28125 1.0625 -0.328125l0.171875 -0.015625zm5.861023 4.609375q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375z" fill-rule="nonzero"/><path fill="#d9ead3" d="m31.874912 252.53609l87.49606 0l0 30.992142l-87.49606 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m31.874912 252.53609l87.49606 0l0 30.992142l-87.49606 0z" fill-rule="evenodd"/><path fill="#000000" d="m67.27695 264.03653q0.21875 0 0.34375 0.140625q0.125 0.125 0.125 0.359375l0 7.578125q0 0.21875 -0.125 0.359375q-0.125 0.140625 -0.34375 0.140625q-0.234375 0 -0.375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -3.4375l-5.062496 0l0 3.4375q0 0.21875 -0.140625 0.359375q-0.125 0.140625 -0.34375 0.140625q-0.234375 0 -0.359375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -7.578125q0 -0.234375 0.125 -0.359375q0.125 -0.140625 0.359375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.359375l0 3.296875l5.062496 0l0 -3.296875q0 -0.234375 0.125 -0.359375q0.140625 -0.140625 0.375 -0.140625zm3.0648193 8.515625q-0.234375 0 -0.375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -7.5q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.375 -0.125l2.34375 0q2.03125 0 3.140625 1.09375q1.109375 1.09375 1.109375 3.125q0 2.03125 -1.125 3.140625q-1.109375 1.09375 -3.125 1.09375l-2.34375 0zm2.28125 -0.84375q3.28125 0 3.28125 -3.390625q0 -3.390625 -3.28125 -3.390625l-1.796875 0l0 6.78125l1.796875 0zm6.5711823 0.90625q-0.21875 0 -0.359375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -7.546875q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.375 -0.125l4.375 0q0.203125 0 0.328125 0.109375q0.125 0.09375 0.125 0.296875q0 0.203125 -0.125 0.3125q-0.125 0.109375 -0.328125 0.109375l-3.90625 0l0 2.90625l3.65625 0q0.21875 0 0.328125 0.109375q0.125 0.109375 0.125 0.3125q0 0.1875 -0.125 0.296875q-0.109375 0.109375 -0.328125 0.109375l-3.65625 0l0 3.453125q0 0.21875 -0.125 0.359375q-0.125 0.140625 -0.359375 0.140625zm9.0746765 -5.359375q0.8125 0 1.40625 0.34375q0.609375 0.328125 0.9375 0.9375q0.328125 0.59375 0.328125 1.390625q0 0.78125 -0.359375 1.40625q-0.359375 0.625 -1.0 0.96875q-0.640625 0.328125 -1.484375 0.328125q-0.734375 0 -1.453125 -0.25q-0.703125 -0.265625 -1.1875 -0.734375q-0.203125 -0.171875 -0.203125 -0.40625q0 -0.171875 0.09375 -0.296875q0.109375 -0.125 0.234375 -0.125q0.171875 0 0.34375 0.140625q0.515625 0.4375 1.046875 0.640625q0.53125 0.203125 1.109375 0.203125q0.890625 0 1.390625 -0.5q0.5 -0.5 0.5 -1.359375q0 -0.84375 -0.5 -1.359375q-0.5 -0.515625 -1.359375 -0.515625q-1.09375 0 -1.78125 0.84375q-0.15625 0.171875 -0.40625 0.171875q-0.15625 0 -0.28125 -0.09375q-0.109375 -0.109375 -0.109375 -0.296875l0 -4.125q0 -0.21875 0.125 -0.34375q0.125 -0.125 0.359375 -0.125l4.21875 0q0.21875 0 0.34375 0.109375q0.125 0.09375 0.125 0.296875q0 0.1875 -0.125 0.296875q-0.125 0.109375 -0.34375 0.109375l-3.734375 0l0 3.015625q0.34375 -0.328125 0.78125 -0.5q0.453125 -0.171875 0.984375 -0.171875z" fill-rule="nonzero"/><path fill="#d9ead3" d="m190.14 134.76706l87.49608 0l0 30.992126l-87.49608 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m190.14 134.76706l87.49608 0l0 30.992126l-87.49608 0z" fill-rule="evenodd"/><path fill="#000000" d="m215.10997 150.37688q0.1875 0 0.296875 0.109375q0.109375 0.109375 0.109375 0.296875l0 2.984375q0 0.296875 -0.09375 0.4375q-0.078125 0.140625 -0.328125 0.234375q-0.46875 0.203125 -1.15625 0.328125q-0.6875 0.109375 -1.375 0.109375q-1.25 0 -2.171875 -0.515625q-0.90625 -0.515625 -1.390625 -1.484375q-0.484375 -0.96875 -0.484375 -2.328125q0 -1.328125 0.46875 -2.296875q0.484375 -0.984375 1.375 -1.5q0.90625 -0.53125 2.125 -0.53125q0.84375 0 1.5625 0.265625q0.71875 0.25 1.203125 0.734375q0.21875 0.203125 0.21875 0.421875q0 0.171875 -0.109375 0.296875q-0.09375 0.125 -0.234375 0.125q-0.140625 0 -0.328125 -0.140625q-0.625 -0.484375 -1.140625 -0.671875q-0.5 -0.1875 -1.15625 -0.1875q-1.4375 0 -2.203125 0.90625q-0.75 0.890625 -0.75 2.578125q0 1.71875 0.765625 2.609375q0.78125 0.890625 2.28125 0.890625q1.109375 0 2.03125 -0.328125l0 -2.578125l-1.75 0q-0.203125 0 -0.328125 -0.109375q-0.125 -0.109375 -0.125 -0.265625q0 -0.1875 0.125 -0.28125q0.125 -0.109375 0.328125 -0.109375l2.234375 0zm5.1568146 -1.5625q0.5 -0.03125 0.5 0.40625q0 0.203125 -0.109375 0.3125q-0.109375 0.109375 -0.375 0.140625l-0.359375 0.03125q-0.796875 0.078125 -1.1875 0.578125q-0.390625 0.484375 -0.390625 1.15625l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.140625 -0.359375q0.140625 -0.125 0.34375 -0.125q0.1875 0 0.3125 0.125q0.140625 0.125 0.140625 0.34375l0 0.671875q0.25 -0.53125 0.71875 -0.796875q0.46875 -0.28125 1.0625 -0.328125l0.171875 -0.015625zm3.720398 -0.015625q2.203125 0 2.203125 2.296875l0 3.265625q0 0.21875 -0.125 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -0.578125q-0.21875 0.515625 -0.6875 0.796875q-0.46875 0.28125 -1.078125 0.28125q-0.5625 0 -1.046875 -0.21875q-0.46875 -0.234375 -0.75 -0.640625q-0.265625 -0.40625 -0.265625 -0.90625q0 -0.65625 0.328125 -1.015625q0.34375 -0.375 1.109375 -0.53125q0.765625 -0.15625 2.125 -0.15625l0.265625 0l0 -0.40625q0 -0.71875 -0.296875 -1.046875q-0.28125 -0.34375 -0.953125 -0.34375q-0.8125 0 -1.65625 0.453125q-0.3125 0.203125 -0.453125 0.203125q-0.140625 0 -0.234375 -0.109375q-0.09375 -0.109375 -0.09375 -0.28125q0 -0.171875 0.09375 -0.296875q0.109375 -0.125 0.328125 -0.25q0.421875 -0.25 0.953125 -0.375q0.546875 -0.140625 1.0625 -0.140625zm-0.390625 5.296875q0.71875 0 1.171875 -0.484375q0.46875 -0.484375 0.46875 -1.25l0 -0.34375l-0.21875 0q-1.046875 0 -1.609375 0.09375q-0.546875 0.078125 -0.78125 0.296875q-0.234375 0.203125 -0.234375 0.609375q0 0.46875 0.34375 0.78125q0.34375 0.296875 0.859375 0.296875zm7.3131714 -5.296875q0.765625 0 1.34375 0.390625q0.59375 0.375 0.921875 1.0625q0.328125 0.6875 0.328125 1.609375q0 0.90625 -0.328125 1.59375q-0.328125 0.671875 -0.90625 1.046875q-0.578125 0.359375 -1.359375 0.359375q-0.6875 0 -1.203125 -0.296875q-0.5 -0.296875 -0.765625 -0.84375l0 2.8125q0 0.21875 -0.125 0.34375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.140625q-0.125 -0.125 -0.125 -0.328125l0 -7.234375q0 -0.21875 0.125 -0.34375q0.125 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.140625q0.125 0.125 0.125 0.34375l0 0.640625q0.265625 -0.546875 0.765625 -0.84375q0.515625 -0.296875 1.203125 -0.296875zm-0.203125 5.265625q0.859375 0 1.328125 -0.578125q0.46875 -0.578125 0.46875 -1.625q0 -1.0625 -0.46875 -1.65625q-0.46875 -0.59375 -1.328125 -0.59375q-0.84375 0 -1.3125 0.578125q-0.453125 0.578125 -0.453125 1.640625q0 1.0625 0.453125 1.65625q0.46875 0.578125 1.3125 0.578125zm7.2028046 -5.265625q1.03125 0 1.546875 0.578125q0.53125 0.578125 0.53125 1.734375l0 3.25q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -3.21875q0 -0.78125 -0.328125 -1.15625q-0.3125 -0.375 -1.0 -0.375q-0.8125 0 -1.296875 0.5q-0.46875 0.484375 -0.46875 1.328125l0 2.921875q0 0.234375 -0.125 0.359375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -7.625q0 -0.203125 0.125 -0.328125q0.140625 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.125q0.125 0.125 0.125 0.34375l0 3.140625q0.28125 -0.53125 0.796875 -0.796875q0.515625 -0.28125 1.1875 -0.28125zm4.5035553 5.984375q-0.234375 0 -0.375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -7.5q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.375 -0.125l2.34375 0q2.03125 0 3.140625 1.09375q1.109375 1.09375 1.109375 3.125q0 2.03125 -1.125 3.140625q-1.109375 1.09375 -3.125 1.09375l-2.34375 0zm2.28125 -0.84375q3.28125 0 3.28125 -3.390625q0 -3.390625 -3.28125 -3.390625l-1.796875 0l0 6.78125l1.796875 0zm10.461807 -0.515625q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375zm6.480301 -2.453125q-0.640625 0.046875 -0.96875 0.40625q-0.3125 0.34375 -0.3125 1.046875l0 0.390625l1.328125 0q0.203125 0 0.3125 0.109375q0.109375 0.109375 0.109375 0.28125q0 0.1875 -0.109375 0.28125q-0.109375 0.09375 -0.3125 0.09375l-1.328125 0l0 4.65625q0 0.234375 -0.140625 0.359375q-0.140625 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.140625 -0.125 -0.140625 -0.359375l0 -4.65625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -0.21875q0 -1.078125 0.53125 -1.6875q0.546875 -0.625 1.5625 -0.703125l0.3125 -0.015625q0.3125 -0.03125 0.453125 0.0625q0.140625 0.078125 0.140625 0.296875q0 0.34375 -0.421875 0.390625l-0.3125 0.03125z" fill-rule="nonzero"/><path fill="#d9ead3" d="m233.1085 252.53609l87.49608 0l0 30.992142l-87.49608 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m233.1085 252.53609l87.49608 0l0 30.992142l-87.49608 0z" fill-rule="evenodd"/><path fill="#000000" d="m260.00964 265.61465q-0.21875 0 -0.359375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -7.546875q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.375 -0.125l4.375 0q0.203125 0 0.328125 0.109375q0.125 0.09375 0.125 0.296875q0 0.203125 -0.125 0.3125q-0.125 0.109375 -0.328125 0.109375l-3.90625 0l0 2.90625l3.65625 0q0.21875 0 0.328125 0.109375q0.125 0.109375 0.125 0.3125q0 0.1875 -0.125 0.296875q-0.109375 0.109375 -0.328125 0.109375l-3.65625 0l0 3.453125q0 0.21875 -0.125 0.359375q-0.125 0.140625 -0.359375 0.140625zm8.9496765 -6.03125q0.5 -0.03125 0.5 0.40625q0 0.203125 -0.109375 0.3125q-0.109375 0.109375 -0.375 0.140625l-0.359375 0.03125q-0.796875 0.078125 -1.1875 0.578125q-0.390625 0.484375 -0.390625 1.15625l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.140625 -0.359375q0.140625 -0.125 0.34375 -0.125q0.1875 0 0.3125 0.125q0.140625 0.125 0.140625 0.34375l0 0.671875q0.25 -0.53125 0.71875 -0.796875q0.46875 -0.28125 1.0625 -0.328125l0.171875 -0.015625zm3.767273 6.046875q-0.828125 0 -1.46875 -0.359375q-0.625 -0.375 -0.96875 -1.0625q-0.34375 -0.703125 -0.34375 -1.609375q0 -0.90625 0.34375 -1.59375q0.34375 -0.703125 0.96875 -1.0625q0.640625 -0.375 1.46875 -0.375q0.828125 0 1.453125 0.375q0.640625 0.359375 0.984375 1.0625q0.34375 0.6875 0.34375 1.59375q0 0.90625 -0.34375 1.609375q-0.34375 0.6875 -0.984375 1.0625q-0.625 0.359375 -1.453125 0.359375zm0 -0.796875q0.859375 0 1.3125 -0.5625q0.46875 -0.578125 0.46875 -1.671875q0 -1.0625 -0.46875 -1.640625q-0.46875 -0.59375 -1.3125 -0.59375q-0.859375 0 -1.328125 0.59375q-0.46875 0.578125 -0.46875 1.640625q0 1.078125 0.453125 1.65625q0.46875 0.578125 1.34375 0.578125zm8.535065 -0.046875q0.203125 0 0.296875 0.109375q0.109375 0.09375 0.109375 0.265625q0 0.1875 -0.109375 0.296875q-0.09375 0.09375 -0.296875 0.09375l-4.203125 0q-0.203125 0 -0.34375 -0.125q-0.125 -0.125 -0.125 -0.3125q0 -0.1875 0.140625 -0.359375l3.546875 -4.28125l-3.28125 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l4.0625 0q0.21875 0 0.34375 0.125q0.140625 0.125 0.140625 0.3125q0 0.1875 -0.140625 0.359375l-3.5625 4.28125l3.421875 0zm6.2547913 -0.59375q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375zm6.8396606 -0.75q2.09375 0 2.09375 2.3125l0 3.25q0 0.234375 -0.125 0.359375q-0.125 0.125 -0.359375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -3.1875q0 -0.8125 -0.328125 -1.1875q-0.3125 -0.375 -1.0 -0.375q-0.8125 0 -1.296875 0.5q-0.46875 0.484375 -0.46875 1.328125l0 2.921875q0 0.234375 -0.125 0.359375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.21875 0.125 -0.34375q0.125 -0.140625 0.359375 -0.140625q0.21875 0 0.34375 0.140625q0.125 0.125 0.125 0.328125l0 0.609375q0.28125 -0.53125 0.796875 -0.8125q0.53125 -0.28125 1.1875 -0.28125z" fill-rule="nonzero"/><path fill="#000000" d="m258.07846 275.1459q0.1875 0 0.296875 0.109375q0.109375 0.109375 0.109375 0.296875l0 2.984375q0 0.296875 -0.09375 0.4375q-0.078125 0.140625 -0.328125 0.234375q-0.46875 0.203125 -1.15625 0.328125q-0.6875 0.109375 -1.3749847 0.109375q-1.25 0 -2.171875 -0.515625q-0.90625 -0.515625 -1.390625 -1.484375q-0.484375 -0.96875 -0.484375 -2.328125q0 -1.328125 0.46875 -2.296875q0.484375 -0.984375 1.375 -1.5q0.90625 -0.53125 2.125 -0.53125q0.84373474 0 1.5624847 0.265625q0.71875 0.25 1.203125 0.734375q0.21875 0.203125 0.21875 0.421875q0 0.171875 -0.109375 0.296875q-0.09375 0.125 -0.234375 0.125q-0.140625 0 -0.328125 -0.140625q-0.625 -0.484375 -1.140625 -0.671875q-0.5 -0.1875 -1.1562347 -0.1875q-1.4375 0 -2.203125 0.90625q-0.75 0.890625 -0.75 2.578125q0 1.71875 0.765625 2.609375q0.78125 0.890625 2.28125 0.890625q1.1093597 0 2.0312347 -0.328125l0 -2.578125l-1.7499847 0q-0.203125 0 -0.328125 -0.109375q-0.125 -0.109375 -0.125 -0.265625q0 -0.1875 0.125 -0.28125q0.125 -0.109375 0.328125 -0.109375l2.2343597 0zm5.15683 -1.5625q0.5 -0.03125 0.5 0.40625q0 0.203125 -0.109375 0.3125q-0.109375 0.109375 -0.375 0.140625l-0.359375 0.03125q-0.796875 0.078125 -1.1875 0.578125q-0.390625 0.484375 -0.390625 1.15625l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.140625 -0.359375q0.140625 -0.125 0.34375 -0.125q0.1875 0 0.3125 0.125q0.140625 0.125 0.140625 0.34375l0 0.671875q0.25 -0.53125 0.71875 -0.796875q0.46875 -0.28125 1.0625 -0.328125l0.171875 -0.015625zm3.720398 -0.015625q2.203125 0 2.203125 2.296875l0 3.265625q0 0.21875 -0.125 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -0.578125q-0.21875 0.515625 -0.6875 0.796875q-0.46875 0.28125 -1.078125 0.28125q-0.5625 0 -1.046875 -0.21875q-0.46875 -0.234375 -0.75 -0.640625q-0.265625 -0.40625 -0.265625 -0.90625q0 -0.65625 0.328125 -1.015625q0.34375 -0.375 1.109375 -0.53125q0.765625 -0.15625 2.125 -0.15625l0.265625 0l0 -0.40625q0 -0.71875 -0.296875 -1.046875q-0.28125 -0.34375 -0.953125 -0.34375q-0.8125 0 -1.65625 0.453125q-0.3125 0.203125 -0.453125 0.203125q-0.140625 0 -0.234375 -0.109375q-0.09375 -0.109375 -0.09375 -0.28125q0 -0.171875 0.09375 -0.296875q0.109375 -0.125 0.328125 -0.25q0.421875 -0.25 0.953125 -0.375q0.546875 -0.140625 1.0625 -0.140625zm-0.390625 5.296875q0.71875 0 1.171875 -0.484375q0.46875 -0.484375 0.46875 -1.25l0 -0.34375l-0.21875 0q-1.046875 0 -1.609375 0.09375q-0.546875 0.078125 -0.78125 0.296875q-0.234375 0.203125 -0.234375 0.609375q0 0.46875 0.34375 0.78125q0.34375 0.296875 0.859375 0.296875zm7.3131714 -5.296875q0.765625 0 1.34375 0.390625q0.59375 0.375 0.921875 1.0625q0.328125 0.6875 0.328125 1.609375q0 0.90625 -0.328125 1.59375q-0.328125 0.671875 -0.90625 1.046875q-0.578125 0.359375 -1.359375 0.359375q-0.6875 0 -1.203125 -0.296875q-0.5 -0.296875 -0.765625 -0.84375l0 2.8125q0 0.21875 -0.125 0.34375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.140625q-0.125 -0.125 -0.125 -0.328125l0 -7.234375q0 -0.21875 0.125 -0.34375q0.125 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.140625q0.125 0.125 0.125 0.34375l0 0.640625q0.265625 -0.546875 0.765625 -0.84375q0.515625 -0.296875 1.203125 -0.296875zm-0.203125 5.265625q0.859375 0 1.328125 -0.578125q0.46875 -0.578125 0.46875 -1.625q0 -1.0625 -0.46875 -1.65625q-0.46875 -0.59375 -1.328125 -0.59375q-0.84375 0 -1.3125 0.578125q-0.453125 0.578125 -0.453125 1.640625q0 1.0625 0.453125 1.65625q0.46875 0.578125 1.3125 0.578125zm7.2027893 -5.265625q1.03125 0 1.546875 0.578125q0.53125 0.578125 0.53125 1.734375l0 3.25q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -3.21875q0 -0.78125 -0.328125 -1.15625q-0.3125 -0.375 -1.0 -0.375q-0.8125 0 -1.296875 0.5q-0.46875 0.484375 -0.46875 1.328125l0 2.921875q0 0.234375 -0.125 0.359375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -7.625q0 -0.203125 0.125 -0.328125q0.140625 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.125q0.125 0.125 0.125 0.34375l0 3.140625q0.28125 -0.53125 0.796875 -0.796875q0.515625 -0.28125 1.1875 -0.28125zm4.5035706 5.984375q-0.234375 0 -0.375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -7.5q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.375 -0.125l2.34375 0q2.03125 0 3.140625 1.09375q1.109375 1.09375 1.109375 3.125q0 2.03125 -1.125 3.140625q-1.109375 1.09375 -3.125 1.09375l-2.34375 0zm2.28125 -0.84375q3.28125 0 3.28125 -3.390625q0 -3.390625 -3.28125 -3.390625l-1.796875 0l0 6.78125l1.796875 0zm10.461792 -0.515625q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375zm6.480316 -2.453125q-0.640625 0.046875 -0.96875 0.40625q-0.3125 0.34375 -0.3125 1.046875l0 0.390625l1.328125 0q0.203125 0 0.3125 0.109375q0.109375 0.109375 0.109375 0.28125q0 0.1875 -0.109375 0.28125q-0.109375 0.09375 -0.3125 0.09375l-1.328125 0l0 4.65625q0 0.234375 -0.140625 0.359375q-0.140625 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.140625 -0.125 -0.140625 -0.359375l0 -4.65625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -0.21875q0 -1.078125 0.53125 -1.6875q0.546875 -0.625 1.5625 -0.703125l0.3125 -0.015625q0.3125 -0.03125 0.453125 0.0625q0.140625 0.078125 0.140625 0.296875q0 0.34375 -0.421875 0.390625l-0.3125 0.03125z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m276.85565 232.16667l0 20.377945" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m276.85565 232.16667l0 16.950867" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m276.85565 249.11754l-1.1246033 -1.124588l1.1246033 3.0897675l1.1245728 -3.0897675z" fill-rule="evenodd"/><path fill="#f4cccc" d="m31.874016 68.3563l87.49606 0l0 30.992126l-87.49606 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m31.874016 68.3563l87.49606 0l0 30.992126l-87.49606 0z" fill-rule="evenodd"/><path fill="#000000" d="m58.725647 87.669235q0.421875 0.03125 0.421875 0.375q0 0.203125 -0.15625 0.3125q-0.140625 0.09375 -0.4375 0.078125l-0.328125 -0.03125q-0.953125 -0.0625 -1.421875 -0.5625q-0.453125 -0.515625 -0.453125 -1.53125l0 -3.015625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -1.359375q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.34375l0 1.359375l1.328125 0q0.1875 0 0.296875 0.109375q0.125 0.109375 0.125 0.28125q0 0.171875 -0.125 0.28125q-0.109375 0.09375 -0.296875 0.09375l-1.328125 0l0 3.0625q0 0.65625 0.265625 0.953125q0.265625 0.296875 0.8125 0.328125l0.3125 0.03125zm3.9706573 -6.984375q-0.640625 0.046875 -0.96875 0.40625q-0.3125 0.34375 -0.3125 1.046875l0 0.390625l1.328125 0q0.203125 0 0.3125 0.109375q0.109375 0.109375 0.109375 0.28125q0 0.1875 -0.109375 0.28125q-0.109375 0.09375 -0.3125 0.09375l-1.328125 0l0 4.65625q0 0.234375 -0.140625 0.359375q-0.140625 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.140625 -0.125 -0.140625 -0.359375l0 -4.65625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -0.21875q0 -1.078125 0.53125 -1.6875q0.546875 -0.625 1.5625 -0.703125l0.3125 -0.015625q0.3125 -0.03125 0.453125 0.0625q0.140625 0.078125 0.140625 0.296875q0 0.34375 -0.421875 0.390625l-0.3125 0.03125zm1.8266602 7.75q-0.28125 0 -0.484375 -0.1875q-0.1875 -0.1875 -0.1875 -0.484375q0 -0.296875 0.1875 -0.484375q0.203125 -0.203125 0.484375 -0.203125q0.28125 0 0.46875 0.203125q0.1875 0.1875 0.1875 0.484375q0 0.296875 -0.1875 0.484375q-0.1875 0.1875 -0.46875 0.1875zm8.498016 -0.8125q0.171875 0.15625 0.171875 0.359375q0 0.15625 -0.140625 0.296875q-0.140625 0.140625 -0.3125 0.140625q-0.15625 0 -0.328125 -0.140625l-4.484375 -3.921875l0 3.578125q0 0.21875 -0.140625 0.359375q-0.125 0.140625 -0.359375 0.140625q-0.21875 0 -0.359375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -7.578125q0 -0.234375 0.125 -0.359375q0.140625 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.140625q0.140625 0.125 0.140625 0.359375l0 3.4375l4.28125 -3.796875q0.125 -0.140625 0.3125 -0.140625q0.171875 0 0.296875 0.140625q0.140625 0.140625 0.140625 0.3125q0 0.171875 -0.15625 0.328125l-3.875 3.421875l4.09375 3.5625zm5.8329315 -0.609375q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375zm6.792801 -0.734375q0.5 -0.03125 0.5 0.40625q0 0.203125 -0.109375 0.3125q-0.109375 0.109375 -0.375 0.140625l-0.359375 0.03125q-0.796875 0.078125 -1.1875 0.578125q-0.390625 0.484375 -0.390625 1.15625l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.140625 -0.359375q0.140625 -0.125 0.34375 -0.125q0.1875 0 0.3125 0.125q0.140625 0.125 0.140625 0.34375l0 0.671875q0.25 -0.53125 0.71875 -0.796875q0.46875 -0.28125 1.0625 -0.328125l0.171875 -0.015625zm3.720398 -0.015625q2.203125 0 2.203125 2.296875l0 3.265625q0 0.21875 -0.125 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -0.578125q-0.21875 0.515625 -0.6875 0.796875q-0.46875 0.28125 -1.078125 0.28125q-0.5625 0 -1.046875 -0.21875q-0.46875 -0.234375 -0.75 -0.640625q-0.265625 -0.40625 -0.265625 -0.90625q0 -0.65625 0.328125 -1.015625q0.34375 -0.375 1.109375 -0.53125q0.765625 -0.15625 2.125 -0.15625l0.265625 0l0 -0.40625q0 -0.71875 -0.296875 -1.046875q-0.28125 -0.34375 -0.953125 -0.34375q-0.8125 0 -1.65625 0.453125q-0.3125 0.203125 -0.453125 0.203125q-0.140625 0 -0.234375 -0.109375q-0.09375 -0.109375 -0.09375 -0.28125q0 -0.171875 0.09375 -0.296875q0.109375 -0.125 0.328125 -0.25q0.421875 -0.25 0.953125 -0.375q0.546875 -0.140625 1.0625 -0.140625zm-0.390625 5.296875q0.71875 0 1.171875 -0.484375q0.46875 -0.484375 0.46875 -1.25l0 -0.34375l-0.21875 0q-1.046875 0 -1.609375 0.09375q-0.546875 0.078125 -0.78125 0.296875q-0.234375 0.203125 -0.234375 0.609375q0 0.46875 0.34375 0.78125q0.34375 0.296875 0.859375 0.296875zm6.3444214 0.765625q-0.5625 0 -1.0625 -0.125q-0.5 -0.140625 -0.875 -0.375q-0.21875 -0.140625 -0.3125 -0.265625q-0.078125 -0.125 -0.078125 -0.3125q0 -0.15625 0.078125 -0.25q0.09375 -0.109375 0.234375 -0.109375q0.15625 0 0.421875 0.1875q0.359375 0.21875 0.71875 0.34375q0.359375 0.125 0.875 0.125q0.65625 0 1.015625 -0.21875q0.359375 -0.234375 0.359375 -0.671875q0 -0.265625 -0.140625 -0.421875q-0.125 -0.171875 -0.453125 -0.296875q-0.3125 -0.125 -0.9375 -0.25q-1.0625 -0.234375 -1.515625 -0.609375q-0.453125 -0.390625 -0.453125 -1.046875q0 -0.515625 0.28125 -0.90625q0.28125 -0.40625 0.796875 -0.625q0.515625 -0.234375 1.15625 -0.234375q0.46875 0 0.90625 0.125q0.4375 0.125 0.78125 0.34375q0.40625 0.296875 0.40625 0.609375q0 0.15625 -0.09375 0.265625q-0.09375 0.109375 -0.234375 0.109375q-0.140625 0 -0.4375 -0.203125q-0.328125 -0.21875 -0.625 -0.34375q-0.296875 -0.125 -0.75 -0.125q-0.5625 0 -0.90625 0.265625q-0.34375 0.25 -0.34375 0.671875q0 0.25 0.125 0.421875q0.125 0.15625 0.421875 0.28125q0.296875 0.125 0.84375 0.25q0.828125 0.1875 1.265625 0.40625q0.453125 0.203125 0.640625 0.515625q0.203125 0.3125 0.203125 0.796875q0 0.75 -0.640625 1.21875q-0.640625 0.453125 -1.671875 0.453125z" fill-rule="nonzero"/><path fill="#f4cccc" d="m132.49081 68.35761l87.49606 0l0 30.992126l-87.49606 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m132.49081 68.35761l87.49606 0l0 30.992126l-87.49606 0z" fill-rule="evenodd"/><path fill="#000000" d="m152.20152 88.37367q-0.234375 0 -0.375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -7.5q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.375 -0.125l4.484375 0q0.21875 0 0.328125 0.109375q0.125 0.09375 0.125 0.296875q0 0.1875 -0.125 0.296875q-0.109375 0.109375 -0.328125 0.109375l-4.015625 0l0 2.9375l3.78125 0q0.21875 0 0.328125 0.109375q0.125 0.109375 0.125 0.296875q0 0.1875 -0.125 0.296875q-0.109375 0.109375 -0.328125 0.109375l-3.78125 0l0 3.078125l4.015625 0q0.21875 0 0.328125 0.109375q0.125 0.09375 0.125 0.296875q0 0.1875 -0.125 0.296875q-0.109375 0.109375 -0.328125 0.109375l-4.484375 0zm8.31218 0.078125q-0.5625 0 -1.0625 -0.125q-0.5 -0.140625 -0.875 -0.375q-0.21875 -0.140625 -0.3125 -0.265625q-0.078125 -0.125 -0.078125 -0.3125q0 -0.15625 0.078125 -0.25q0.09375 -0.109375 0.234375 -0.109375q0.15625 0 0.421875 0.1875q0.359375 0.21875 0.71875 0.34375q0.359375 0.125 0.875 0.125q0.65625 0 1.015625 -0.21875q0.359375 -0.234375 0.359375 -0.671875q0 -0.265625 -0.140625 -0.421875q-0.125 -0.171875 -0.453125 -0.296875q-0.3125 -0.125 -0.9375 -0.25q-1.0625 -0.234375 -1.515625 -0.609375q-0.453125 -0.390625 -0.453125 -1.046875q0 -0.515625 0.28125 -0.90625q0.28125 -0.40625 0.796875 -0.625q0.515625 -0.234375 1.15625 -0.234375q0.46875 0 0.90625 0.125q0.4375 0.125 0.78125 0.34375q0.40625 0.296875 0.40625 0.609375q0 0.15625 -0.09375 0.265625q-0.09375 0.109375 -0.234375 0.109375q-0.140625 0 -0.4375 -0.203125q-0.328125 -0.21875 -0.625 -0.34375q-0.296875 -0.125 -0.75 -0.125q-0.5625 0 -0.90625 0.265625q-0.34375 0.25 -0.34375 0.671875q0 0.25 0.125 0.421875q0.125 0.15625 0.421875 0.28125q0.296875 0.125 0.84375 0.25q0.828125 0.1875 1.265625 0.40625q0.453125 0.203125 0.640625 0.515625q0.203125 0.3125 0.203125 0.796875q0 0.75 -0.640625 1.21875q-0.640625 0.453125 -1.671875 0.453125zm6.4787903 -0.78125q0.421875 0.03125 0.421875 0.375q0 0.203125 -0.15625 0.3125q-0.140625 0.09375 -0.4375 0.078125l-0.328125 -0.03125q-0.953125 -0.0625 -1.421875 -0.5625q-0.453125 -0.515625 -0.453125 -1.53125l0 -3.015625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -1.359375q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.34375l0 1.359375l1.328125 0q0.1875 0 0.296875 0.109375q0.125 0.109375 0.125 0.28125q0 0.171875 -0.125 0.28125q-0.109375 0.09375 -0.296875 0.09375l-1.328125 0l0 3.0625q0 0.65625 0.265625 0.953125q0.265625 0.296875 0.8125 0.328125l0.3125 0.03125zm1.8769073 0.765625q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.125 -0.359375q0.140625 -0.125 0.359375 -0.125q0.21875 0 0.34375 0.125q0.140625 0.125 0.140625 0.359375l0 5.0625q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125zm0 -7.28125q-0.296875 0 -0.484375 -0.171875q-0.171875 -0.171875 -0.171875 -0.453125q0 -0.25 0.171875 -0.421875q0.1875 -0.171875 0.484375 -0.171875q0.28125 0 0.453125 0.171875q0.1875 0.171875 0.1875 0.421875q0 0.28125 -0.1875 0.453125q-0.171875 0.171875 -0.453125 0.171875zm8.799652 1.234375q1.9375 0 1.9375 2.3125l0 3.25q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.328125 0.125q-0.21875 0 -0.359375 -0.125q-0.140625 -0.125 -0.140625 -0.359375l0 -3.21875q0 -0.8125 -0.296875 -1.171875q-0.28125 -0.359375 -0.890625 -0.359375q-0.734375 0 -1.15625 0.5q-0.421875 0.484375 -0.421875 1.328125l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -3.21875q0 -0.8125 -0.296875 -1.171875q-0.28125 -0.359375 -0.90625 -0.359375q-0.71875 0 -1.140625 0.5q-0.421875 0.484375 -0.421875 1.328125l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.359375 -0.140625q0.203125 0 0.328125 0.125q0.140625 0.125 0.140625 0.34375l0 0.578125q0.265625 -0.515625 0.734375 -0.78125q0.46875 -0.28125 1.078125 -0.28125q1.375 0 1.78125 1.140625q0.265625 -0.515625 0.78125 -0.828125q0.515625 -0.3125 1.171875 -0.3125zm6.0990753 0q2.203125 0 2.203125 2.296875l0 3.265625q0 0.21875 -0.125 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -0.578125q-0.21875 0.515625 -0.6875 0.796875q-0.46875 0.28125 -1.078125 0.28125q-0.5625 0 -1.046875 -0.21875q-0.46875 -0.234375 -0.75 -0.640625q-0.265625 -0.40625 -0.265625 -0.90625q0 -0.65625 0.328125 -1.015625q0.34375 -0.375 1.109375 -0.53125q0.765625 -0.15625 2.125 -0.15625l0.265625 0l0 -0.40625q0 -0.71875 -0.296875 -1.046875q-0.28125 -0.34375 -0.953125 -0.34375q-0.8125 0 -1.65625 0.453125q-0.3125 0.203125 -0.453125 0.203125q-0.140625 0 -0.234375 -0.109375q-0.09375 -0.109375 -0.09375 -0.28125q0 -0.171875 0.09375 -0.296875q0.109375 -0.125 0.328125 -0.25q0.421875 -0.25 0.953125 -0.375q0.546875 -0.140625 1.0625 -0.140625zm-0.390625 5.296875q0.71875 0 1.171875 -0.484375q0.46875 -0.484375 0.46875 -1.25l0 -0.34375l-0.21875 0q-1.046875 0 -1.609375 0.09375q-0.546875 0.078125 -0.78125 0.296875q-0.234375 0.203125 -0.234375 0.609375q0 0.46875 0.34375 0.78125q0.34375 0.296875 0.859375 0.296875zm7.0631714 -0.015625q0.421875 0.03125 0.421875 0.375q0 0.203125 -0.15625 0.3125q-0.140625 0.09375 -0.4375 0.078125l-0.328125 -0.03125q-0.953125 -0.0625 -1.421875 -0.5625q-0.453125 -0.515625 -0.453125 -1.53125l0 -3.015625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -1.359375q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.34375l0 1.359375l1.328125 0q0.1875 0 0.296875 0.109375q0.125 0.109375 0.125 0.28125q0 0.171875 -0.125 0.28125q-0.109375 0.09375 -0.296875 0.09375l-1.328125 0l0 3.0625q0 0.65625 0.265625 0.953125q0.265625 0.296875 0.8125 0.328125l0.3125 0.03125zm3.8144073 0.78125q-0.828125 0 -1.46875 -0.359375q-0.625 -0.375 -0.96875 -1.0625q-0.34375 -0.703125 -0.34375 -1.609375q0 -0.90625 0.34375 -1.59375q0.34375 -0.703125 0.96875 -1.0625q0.640625 -0.375 1.46875 -0.375q0.828125 0 1.453125 0.375q0.640625 0.359375 0.984375 1.0625q0.34375 0.6875 0.34375 1.59375q0 0.90625 -0.34375 1.609375q-0.34375 0.6875 -0.984375 1.0625q-0.625 0.359375 -1.453125 0.359375zm0 -0.796875q0.859375 0 1.3125 -0.5625q0.46875 -0.578125 0.46875 -1.671875q0 -1.0625 -0.46875 -1.640625q-0.46875 -0.59375 -1.3125 -0.59375q-0.859375 0 -1.328125 0.59375q-0.46875 0.578125 -0.46875 1.640625q0 1.078125 0.453125 1.65625q0.46875 0.578125 1.34375 0.578125zm7.1287994 -5.25q0.5 -0.03125 0.5 0.40625q0 0.203125 -0.109375 0.3125q-0.109375 0.109375 -0.375 0.140625l-0.359375 0.03125q-0.796875 0.078125 -1.1875 0.578125q-0.390625 0.484375 -0.390625 1.15625l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.140625 -0.359375q0.140625 -0.125 0.34375 -0.125q0.1875 0 0.3125 0.125q0.140625 0.125 0.140625 0.34375l0 0.671875q0.25 -0.53125 0.71875 -0.796875q0.46875 -0.28125 1.0625 -0.328125l0.171875 -0.015625z" fill-rule="nonzero"/><path fill="#f4cccc" d="m233.1076 68.35761l87.49606 0l0 30.992126l-87.49606 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m233.1076 68.35761l87.49606 0l0 30.992126l-87.49606 0z" fill-rule="evenodd"/><path fill="#000000" d="m269.00754 88.46742q-0.90625 0 -1.734375 -0.265625q-0.8125 -0.265625 -1.3125 -0.734375q-0.171875 -0.15625 -0.171875 -0.40625q0 -0.171875 0.09375 -0.296875q0.09375 -0.125 0.234375 -0.125q0.15625 0 0.328125 0.125q1.109375 0.859375 2.546875 0.859375q1.03125 0 1.578125 -0.390625q0.5625 -0.390625 0.5625 -1.125q0 -0.421875 -0.265625 -0.671875q-0.265625 -0.265625 -0.703125 -0.421875q-0.4375 -0.15625 -1.15625 -0.328125q-0.984375 -0.21875 -1.625 -0.46875q-0.625 -0.265625 -1.015625 -0.734375q-0.390625 -0.46875 -0.390625 -1.21875q0 -0.71875 0.390625 -1.265625q0.390625 -0.5625 1.09375 -0.875q0.703125 -0.3125 1.59375 -0.3125q0.84375 0 1.5625 0.265625q0.734375 0.25 1.234375 0.734375q0.1875 0.1875 0.1875 0.421875q0 0.171875 -0.09375 0.296875q-0.09375 0.125 -0.234375 0.125q-0.125 0 -0.34375 -0.140625q-0.59375 -0.46875 -1.09375 -0.65625q-0.5 -0.203125 -1.21875 -0.203125q-0.984375 0 -1.546875 0.421875q-0.546875 0.40625 -0.546875 1.15625q0 0.625 0.484375 0.953125q0.484375 0.3125 1.5 0.5625q1.09375 0.25 1.71875 0.484375q0.625 0.21875 1.03125 0.671875q0.421875 0.4375 0.421875 1.171875q0 0.71875 -0.390625 1.265625q-0.390625 0.53125 -1.109375 0.828125q-0.703125 0.296875 -1.609375 0.296875zm5.0446777 -0.03125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -7.625q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.359375 -0.125q0.203125 0 0.34375 0.125q0.140625 0.125 0.140625 0.34375l0 7.625q0 0.234375 -0.140625 0.359375q-0.140625 0.125 -0.34375 0.125zm2.784027 0q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.125 -0.359375q0.140625 -0.125 0.359375 -0.125q0.21875 0 0.34375 0.125q0.140625 0.125 0.140625 0.359375l0 5.0625q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125zm0 -7.28125q-0.296875 0 -0.484375 -0.171875q-0.171875 -0.171875 -0.171875 -0.453125q0 -0.25 0.171875 -0.421875q0.1875 -0.171875 0.484375 -0.171875q0.28125 0 0.453125 0.171875q0.1875 0.171875 0.1875 0.421875q0 0.28125 -0.1875 0.453125q-0.171875 0.171875 -0.453125 0.171875zm8.799652 1.234375q1.9375 0 1.9375 2.3125l0 3.25q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.328125 0.125q-0.21875 0 -0.359375 -0.125q-0.140625 -0.125 -0.140625 -0.359375l0 -3.21875q0 -0.8125 -0.296875 -1.171875q-0.28125 -0.359375 -0.890625 -0.359375q-0.734375 0 -1.15625 0.5q-0.421875 0.484375 -0.421875 1.328125l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -3.21875q0 -0.8125 -0.296875 -1.171875q-0.28125 -0.359375 -0.90625 -0.359375q-0.71875 0 -1.140625 0.5q-0.421875 0.484375 -0.421875 1.328125l0 2.921875q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.359375 -0.140625q0.203125 0 0.328125 0.125q0.140625 0.125 0.140625 0.34375l0 0.578125q0.265625 -0.515625 0.734375 -0.78125q0.46875 -0.28125 1.078125 -0.28125q1.375 0 1.78125 1.140625q0.265625 -0.515625 0.78125 -0.828125q0.515625 -0.3125 1.171875 -0.3125z" fill-rule="nonzero"/><path fill="#d9ead3" d="m282.5035 134.76706l87.49606 0l0 30.992126l-87.49606 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m282.5035 134.76706l87.49606 0l0 30.992126l-87.49606 0z" fill-rule="evenodd"/><path fill="#000000" d="m297.8283 154.87688q-1.1875 0 -2.0625 -0.515625q-0.875 -0.53125 -1.359375 -1.5q-0.46875 -0.984375 -0.46875 -2.3125q0 -1.328125 0.46875 -2.296875q0.484375 -0.984375 1.359375 -1.5q0.875 -0.53125 2.0625 -0.53125q0.8125 0 1.515625 0.265625q0.71875 0.25 1.25 0.734375q0.1875 0.1875 0.1875 0.421875q0 0.171875 -0.09375 0.296875q-0.09375 0.125 -0.21875 0.125q-0.15625 0 -0.359375 -0.140625q-0.609375 -0.46875 -1.109375 -0.65625q-0.5 -0.203125 -1.140625 -0.203125q-1.390625 0 -2.140625 0.90625q-0.75 0.90625 -0.75 2.578125q0 1.671875 0.75 2.578125q0.75 0.90625 2.140625 0.90625q0.640625 0 1.140625 -0.1875q0.5 -0.1875 1.109375 -0.671875q0.203125 -0.125 0.359375 -0.125q0.125 0 0.21875 0.125q0.09375 0.109375 0.09375 0.296875q0 0.234375 -0.1875 0.40625q-0.53125 0.484375 -1.25 0.75q-0.703125 0.25 -1.515625 0.25zm7.358429 -6.078125q1.03125 0 1.546875 0.578125q0.53125 0.578125 0.53125 1.734375l0 3.25q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -3.21875q0 -0.78125 -0.328125 -1.15625q-0.3125 -0.375 -1.0 -0.375q-0.8125 0 -1.296875 0.5q-0.46875 0.484375 -0.46875 1.328125l0 2.921875q0 0.234375 -0.125 0.359375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -7.625q0 -0.203125 0.125 -0.328125q0.140625 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.125q0.125 0.125 0.125 0.34375l0 3.140625q0.28125 -0.53125 0.796875 -0.796875q0.515625 -0.28125 1.1875 -0.28125zm8.37854 4.625q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375zm6.308441 5.3125q-0.8125 0 -1.453125 -0.359375q-0.625 -0.375 -0.96875 -1.0625q-0.34375 -0.6875 -0.34375 -1.578125q0 -0.90625 0.359375 -1.59375q0.359375 -0.703125 0.984375 -1.078125q0.640625 -0.390625 1.46875 -0.390625q0.453125 0 0.90625 0.125q0.453125 0.125 0.78125 0.359375q0.21875 0.140625 0.3125 0.28125q0.09375 0.140625 0.09375 0.3125q0 0.171875 -0.09375 0.28125q-0.09375 0.09375 -0.234375 0.09375q-0.078125 0 -0.1875 -0.046875q-0.09375 -0.046875 -0.15625 -0.09375q-0.0625 -0.046875 -0.09375 -0.0625q-0.3125 -0.203125 -0.59375 -0.3125q-0.28125 -0.125 -0.6875 -0.125q-0.875 0 -1.359375 0.59375q-0.484375 0.59375 -0.484375 1.65625q0 1.046875 0.484375 1.625q0.484375 0.578125 1.359375 0.578125q0.40625 0 0.703125 -0.109375q0.296875 -0.125 0.59375 -0.328125q0.140625 -0.09375 0.25 -0.15625q0.125 -0.0625 0.203125 -0.0625q0.140625 0 0.21875 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.15625 -0.09375 0.28125q-0.078125 0.125 -0.296875 0.28125q-0.34375 0.234375 -0.8125 0.375q-0.46875 0.125 -0.953125 0.125zm7.998047 -0.84375q0.203125 0.171875 0.203125 0.375q0 0.1875 -0.125 0.328125q-0.125 0.125 -0.3125 0.125q-0.15625 0 -0.328125 -0.140625l-3.125 -2.703125l0 2.359375q0 0.234375 -0.140625 0.359375q-0.140625 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -7.625q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.359375 -0.125q0.203125 0 0.34375 0.125q0.140625 0.125 0.140625 0.34375l0 4.875l2.859375 -2.625q0.15625 -0.140625 0.328125 -0.140625q0.1875 0 0.3125 0.140625q0.140625 0.125 0.140625 0.296875q0 0.203125 -0.171875 0.359375l-2.375 2.109375l2.59375 2.265625zm4.2812805 -5.21875q0.765625 0 1.34375 0.390625q0.59375 0.375 0.921875 1.0625q0.328125 0.6875 0.328125 1.609375q0 0.90625 -0.328125 1.59375q-0.328125 0.671875 -0.90625 1.046875q-0.578125 0.359375 -1.359375 0.359375q-0.6875 0 -1.203125 -0.296875q-0.5 -0.296875 -0.765625 -0.84375l0 2.8125q0 0.21875 -0.125 0.34375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.140625q-0.125 -0.125 -0.125 -0.328125l0 -7.234375q0 -0.21875 0.125 -0.34375q0.125 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.140625q0.125 0.125 0.125 0.34375l0 0.640625q0.265625 -0.546875 0.765625 -0.84375q0.515625 -0.296875 1.203125 -0.296875zm-0.203125 5.265625q0.859375 0 1.328125 -0.578125q0.46875 -0.578125 0.46875 -1.625q0 -1.0625 -0.46875 -1.65625q-0.46875 -0.59375 -1.328125 -0.59375q-0.84375 0 -1.3125 0.578125q-0.453125 0.578125 -0.453125 1.640625q0 1.0625 0.453125 1.65625q0.46875 0.578125 1.3125 0.578125zm6.67157 0.796875q-0.828125 0 -1.46875 -0.359375q-0.625 -0.375 -0.96875 -1.0625q-0.34375 -0.703125 -0.34375 -1.609375q0 -0.90625 0.34375 -1.59375q0.34375 -0.703125 0.96875 -1.0625q0.640625 -0.375 1.46875 -0.375q0.828125 0 1.453125 0.375q0.640625 0.359375 0.984375 1.0625q0.34375 0.6875 0.34375 1.59375q0 0.90625 -0.34375 1.609375q-0.34375 0.6875 -0.984375 1.0625q-0.625 0.359375 -1.453125 0.359375zm0 -0.796875q0.859375 0 1.3125 -0.5625q0.46875 -0.578125 0.46875 -1.671875q0 -1.0625 -0.46875 -1.640625q-0.46875 -0.59375 -1.3125 -0.59375q-0.859375 0 -1.328125 0.59375q-0.46875 0.578125 -0.46875 1.640625q0 1.078125 0.453125 1.65625q0.46875 0.578125 1.34375 0.578125zm4.722534 0.78125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.234375 0.125 -0.359375q0.140625 -0.125 0.359375 -0.125q0.21875 0 0.34375 0.125q0.140625 0.125 0.140625 0.359375l0 5.0625q0 0.234375 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125zm0 -7.28125q-0.296875 0 -0.484375 -0.171875q-0.171875 -0.171875 -0.171875 -0.453125q0 -0.25 0.171875 -0.421875q0.1875 -0.171875 0.484375 -0.171875q0.28125 0 0.453125 0.171875q0.1875 0.171875 0.1875 0.421875q0 0.28125 -0.1875 0.453125q-0.171875 0.171875 -0.453125 0.171875zm5.237152 1.234375q2.09375 0 2.09375 2.3125l0 3.25q0 0.234375 -0.125 0.359375q-0.125 0.125 -0.359375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -3.1875q0 -0.8125 -0.328125 -1.1875q-0.3125 -0.375 -1.0 -0.375q-0.8125 0 -1.296875 0.5q-0.46875 0.484375 -0.46875 1.328125l0 2.921875q0 0.234375 -0.125 0.359375q-0.125 0.125 -0.359375 0.125q-0.234375 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -5.0625q0 -0.21875 0.125 -0.34375q0.125 -0.140625 0.359375 -0.140625q0.21875 0 0.34375 0.140625q0.125 0.125 0.125 0.328125l0 0.609375q0.28125 -0.53125 0.796875 -0.8125q0.53125 -0.28125 1.1875 -0.28125zm6.5660706 5.28125q0.421875 0.03125 0.421875 0.375q0 0.203125 -0.15625 0.3125q-0.140625 0.09375 -0.4375 0.078125l-0.328125 -0.03125q-0.953125 -0.0625 -1.421875 -0.5625q-0.453125 -0.515625 -0.453125 -1.53125l0 -3.015625l-0.796875 0q-0.203125 0 -0.328125 -0.09375q-0.109375 -0.109375 -0.109375 -0.28125q0 -0.171875 0.109375 -0.28125q0.125 -0.109375 0.328125 -0.109375l0.796875 0l0 -1.359375q0 -0.21875 0.125 -0.34375q0.140625 -0.140625 0.375 -0.140625q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.34375l0 1.359375l1.328125 0q0.1875 0 0.296875 0.109375q0.125 0.109375 0.125 0.28125q0 0.171875 -0.125 0.28125q-0.109375 0.09375 -0.296875 0.09375l-1.328125 0l0 3.0625q0 0.65625 0.265625 0.953125q0.265625 0.296875 0.8125 0.328125l0.3125 0.03125zm3.361267 0.78125q-0.5625 0 -1.0625 -0.125q-0.5 -0.140625 -0.875 -0.375q-0.21875 -0.140625 -0.3125 -0.265625q-0.078125 -0.125 -0.078125 -0.3125q0 -0.15625 0.078125 -0.25q0.09375 -0.109375 0.234375 -0.109375q0.15625 0 0.421875 0.1875q0.359375 0.21875 0.71875 0.34375q0.359375 0.125 0.875 0.125q0.65625 0 1.015625 -0.21875q0.359375 -0.234375 0.359375 -0.671875q0 -0.265625 -0.140625 -0.421875q-0.125 -0.171875 -0.453125 -0.296875q-0.3125 -0.125 -0.9375 -0.25q-1.0625 -0.234375 -1.515625 -0.609375q-0.453125 -0.390625 -0.453125 -1.046875q0 -0.515625 0.28125 -0.90625q0.28125 -0.40625 0.796875 -0.625q0.515625 -0.234375 1.15625 -0.234375q0.46875 0 0.90625 0.125q0.4375 0.125 0.78125 0.34375q0.40625 0.296875 0.40625 0.609375q0 0.15625 -0.09375 0.265625q-0.09375 0.109375 -0.234375 0.109375q-0.140625 0 -0.4375 -0.203125q-0.328125 -0.21875 -0.625 -0.34375q-0.296875 -0.125 -0.75 -0.125q-0.5625 0 -0.90625 0.265625q-0.34375 0.25 -0.34375 0.671875q0 0.25 0.125 0.421875q0.125 0.15625 0.421875 0.28125q0.296875 0.125 0.84375 0.25q0.828125 0.1875 1.265625 0.40625q0.453125 0.203125 0.640625 0.515625q0.203125 0.3125 0.203125 0.796875q0 0.75 -0.640625 1.21875q-0.640625 0.453125 -1.671875 0.453125z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m276.85565 99.34974l0 17.70874l-42.960632 0l0 17.724327" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m276.85565 99.34974l0 17.70874l-42.960632 0l0 14.297249" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m233.89502 131.35573l-1.124588 -1.124588l1.124588 3.0897675l1.1245728 -3.0897675z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m276.85565 99.34974l0 17.70874l49.385803 0l0 17.724327" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m276.85565 99.34974l0 17.70874l49.385803 0l0 14.297249" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m326.24146 131.35573l-1.1245728 -1.124588l1.1245728 3.0897675l1.1246033 -3.0897675z" fill-rule="evenodd"/><path fill="#c9daf8" d="m548.5407 235.66077l87.49603 0l0 30.992126l-87.49603 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m548.5407 235.66077l87.49603 0l0 30.992126l-87.49603 0z" fill-rule="evenodd"/><path fill="#000000" d="m579.47955 247.1612q0.203125 0 0.328125 0.140625q0.125 0.125 0.125 0.359375l0 7.578125q0 0.21875 -0.125 0.359375q-0.125 0.140625 -0.359375 0.140625q-0.234375 0 -0.390625 -0.203125l-4.984375 -6.65625l0 6.359375q0 0.21875 -0.125 0.359375q-0.125 0.140625 -0.34375 0.140625q-0.21875 0 -0.34375 -0.140625q-0.109375 -0.140625 -0.109375 -0.359375l0 -7.578125q0 -0.234375 0.125 -0.359375q0.125 -0.140625 0.359375 -0.140625q0.234375 0 0.40625 0.203125l4.96875 6.65625l0 -6.359375q0 -0.234375 0.125 -0.359375q0.125 -0.140625 0.34375 -0.140625zm8.868103 0q0.203125 0 0.328125 0.140625q0.125 0.125 0.125 0.359375l0 7.578125q0 0.21875 -0.125 0.359375q-0.125 0.140625 -0.359375 0.140625q-0.234375 0 -0.390625 -0.203125l-4.984375 -6.65625l0 6.359375q0 0.21875 -0.125 0.359375q-0.125 0.140625 -0.34375 0.140625q-0.21875 0 -0.34375 -0.140625q-0.109375 -0.140625 -0.109375 -0.359375l0 -7.578125q0 -0.234375 0.125 -0.359375q0.125 -0.140625 0.359375 -0.140625q0.234375 0 0.40625 0.203125l4.96875 6.65625l0 -6.359375q0 -0.234375 0.125 -0.359375q0.125 -0.140625 0.34375 -0.140625zm12.917175 7.953125q0.046875 0.09375 0.046875 0.203125q0 0.171875 -0.140625 0.296875q-0.140625 0.125 -0.328125 0.125q-0.296875 0 -0.421875 -0.296875l-0.84375 -1.9375l-4.53125 0l-0.859375 1.9375q-0.125 0.296875 -0.421875 0.296875q-0.1875 0 -0.34375 -0.125q-0.140625 -0.125 -0.140625 -0.3125q0 -0.09375 0.046875 -0.1875l3.4375 -7.640625q0.078125 -0.15625 0.21875 -0.234375q0.140625 -0.09375 0.3125 -0.09375q0.171875 0 0.3125 0.09375q0.15625 0.078125 0.21875 0.234375l3.4375 7.640625zm-5.859375 -2.421875l3.8125 0l-1.90625 -4.3125l-1.90625 4.3125zm7.78656 3.046875q-0.21875 0 -0.359375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -7.546875q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.375 -0.125l2.84375 0q1.328125 0 2.0625 0.65625q0.75 0.640625 0.75 1.828125q0 1.1875 -0.75 1.84375q-0.734375 0.65625 -2.0625 0.65625l-2.359375 0l0 3.03125q0 0.21875 -0.140625 0.359375q-0.125 0.140625 -0.359375 0.140625zm2.765625 -4.34375q1.9375 0 1.9375 -1.6875q0 -1.671875 -1.9375 -1.671875l-2.265625 0l0 3.359375l2.265625 0zm4.9744263 4.34375q-0.21875 0 -0.359375 -0.140625q-0.125 -0.140625 -0.125 -0.359375l0 -7.578125q0 -0.234375 0.125 -0.359375q0.140625 -0.140625 0.359375 -0.140625q0.234375 0 0.359375 0.140625q0.140625 0.125 0.140625 0.359375l0 7.578125q0 0.21875 -0.140625 0.359375q-0.125 0.140625 -0.359375 0.140625z" fill-rule="nonzero"/><path fill="#c9daf8" d="m548.5407 193.79199l87.49603 0l0 30.992126l-87.49603 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m548.5407 193.79199l87.49603 0l0 30.992126l-87.49603 0z" fill-rule="evenodd"/><path fill="#000000" d="m589.5417 213.87056q-0.28125 0 -0.484375 -0.1875q-0.1875 -0.1875 -0.1875 -0.484375q0 -0.296875 0.1875 -0.484375q0.203125 -0.203125 0.484375 -0.203125q0.28125 0 0.46875 0.203125q0.1875 0.1875 0.1875 0.484375q0 0.296875 -0.1875 0.484375q-0.1875 0.1875 -0.46875 0.1875zm2.7480469 0q-0.28125 0 -0.484375 -0.1875q-0.1875 -0.1875 -0.1875 -0.484375q0 -0.296875 0.1875 -0.484375q0.203125 -0.203125 0.484375 -0.203125q0.28125 0 0.46875 0.203125q0.1875 0.1875 0.1875 0.484375q0 0.296875 -0.1875 0.484375q-0.1875 0.1875 -0.46875 0.1875zm2.7479858 0q-0.28125 0 -0.484375 -0.1875q-0.1875 -0.1875 -0.1875 -0.484375q0 -0.296875 0.1875 -0.484375q0.203125 -0.203125 0.484375 -0.203125q0.28125 0 0.46875 0.203125q0.1875 0.1875 0.1875 0.484375q0 0.296875 -0.1875 0.484375q-0.1875 0.1875 -0.46875 0.1875z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m75.62294 283.52823l0 17.950958l100.62993 0l0 17.954529" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m75.62295 283.52823l0 17.950928l100.62992 0l0 14.527496" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m176.25287 316.00665l-1.124588 -1.1246033l1.124588 3.0897827l1.124588 -3.0897827z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m276.85654 283.52823l0 17.950958l-100.62991 0l0 17.954529" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m276.85654 283.52823l0 17.950928l-100.62991 0l0 14.527496" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m176.22662 316.00665l-1.124588 -1.1246033l1.124588 3.0897827l1.124588 -3.0897827z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m500.5223 334.89435l24.009003 0l0 0.06298828l24.022522 0" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m500.5223 334.89435l24.009003 0l0 0.06298828l20.595398 0" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m545.1267 334.95734l-1.1245728 1.1246033l3.0897827 -1.1246033l-3.0897827 -1.1245728z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m500.5223 334.89435l24.009003 0l0 -41.858246l24.022522 0" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m500.5223 334.89435l24.009003 0l0 -41.858246l20.595398 0" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m545.1267 293.0361l-1.1245728 1.1245728l3.0897827 -1.1245728l-3.0897827 -1.1246033z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m500.5223 334.89435l24.009003 0l0 -83.74802l24.022522 0" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m500.5223 334.89435l24.009003 0l0 -83.74802l20.595398 0" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m545.1267 251.14633l-1.1245728 1.1245728l3.0897827 -1.1245728l-3.0897827 -1.124588z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m500.5223 334.89435l24.009003 0l0 -125.60629l24.022522 0" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m500.5223 334.89435l24.009003 0l0 -125.60629l20.595398 0" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m545.1267 209.28806l-1.1245728 1.124588l3.0897827 -1.124588l-3.0897827 -1.124588z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m233.88803 165.75919l0 17.70752l42.960632 0l0 17.694061" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m233.88805 165.75919l0 17.70752l42.960617 0l0 14.266968" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m276.84866 197.73367l-1.1245728 -1.124588l1.1245728 3.0897675l1.1246033 -3.0897675z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m326.25156 165.75919l0 17.70752l-49.385834 0l0 17.694061" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m326.25156 165.75919l0 17.70752l-49.385834 0l0 14.266968" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m276.86572 197.73367l-1.1245728 -1.124588l1.1245728 3.0897675l1.1246033 -3.0897675z" fill-rule="evenodd"/><path fill="#d9ead3" d="m132.49171 252.53609l87.49606 0l0 30.992142l-87.49606 0z" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m132.49171 252.53609l87.49606 0l0 30.992142l-87.49606 0z" fill-rule="evenodd"/><path fill="#000000" d="m146.9475 272.6459q-0.90625 0 -1.734375 -0.265625q-0.8125 -0.265625 -1.3125 -0.734375q-0.171875 -0.15625 -0.171875 -0.40625q0 -0.171875 0.09375 -0.296875q0.09375 -0.125 0.234375 -0.125q0.15625 0 0.328125 0.125q1.109375 0.859375 2.546875 0.859375q1.03125 0 1.578125 -0.390625q0.5625 -0.390625 0.5625 -1.125q0 -0.421875 -0.265625 -0.671875q-0.265625 -0.265625 -0.703125 -0.421875q-0.4375 -0.15625 -1.15625 -0.328125q-0.984375 -0.21875 -1.625 -0.46875q-0.625 -0.265625 -1.015625 -0.734375q-0.390625 -0.46875 -0.390625 -1.21875q0 -0.71875 0.390625 -1.265625q0.390625 -0.5625 1.09375 -0.875q0.703125 -0.3125 1.59375 -0.3125q0.84375 0 1.5625 0.265625q0.734375 0.25 1.234375 0.734375q0.1875 0.1875 0.1875 0.421875q0 0.171875 -0.09375 0.296875q-0.09375 0.125 -0.234375 0.125q-0.125 0 -0.34375 -0.140625q-0.59375 -0.46875 -1.09375 -0.65625q-0.5 -0.203125 -1.21875 -0.203125q-0.984375 0 -1.546875 0.421875q-0.546875 0.40625 -0.546875 1.15625q0 0.625 0.484375 0.953125q0.484375 0.3125 1.5 0.5625q1.09375 0.25 1.71875 0.484375q0.625 0.21875 1.03125 0.671875q0.421875 0.4375 0.421875 1.171875q0 0.71875 -0.390625 1.265625q-0.390625 0.53125 -1.109375 0.828125q-0.703125 0.296875 -1.609375 0.296875zm6.9353027 -6.078125q2.203125 0 2.203125 2.296875l0 3.265625q0 0.21875 -0.125 0.359375q-0.125 0.125 -0.34375 0.125q-0.21875 0 -0.359375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -0.578125q-0.21875 0.515625 -0.6875 0.796875q-0.46875 0.28125 -1.078125 0.28125q-0.5625 0 -1.046875 -0.21875q-0.46875 -0.234375 -0.75 -0.640625q-0.265625 -0.40625 -0.265625 -0.90625q0 -0.65625 0.328125 -1.015625q0.34375 -0.375 1.109375 -0.53125q0.765625 -0.15625 2.125 -0.15625l0.265625 0l0 -0.40625q0 -0.71875 -0.296875 -1.046875q-0.28125 -0.34375 -0.953125 -0.34375q-0.8125 0 -1.65625 0.453125q-0.3125 0.203125 -0.453125 0.203125q-0.140625 0 -0.234375 -0.109375q-0.09375 -0.109375 -0.09375 -0.28125q0 -0.171875 0.09375 -0.296875q0.109375 -0.125 0.328125 -0.25q0.421875 -0.25 0.953125 -0.375q0.546875 -0.140625 1.0625 -0.140625zm-0.390625 5.296875q0.71875 0 1.171875 -0.484375q0.46875 -0.484375 0.46875 -1.25l0 -0.34375l-0.21875 0q-1.046875 0 -1.609375 0.09375q-0.546875 0.078125 -0.78125 0.296875q-0.234375 0.203125 -0.234375 0.609375q0 0.46875 0.34375 0.78125q0.34375 0.296875 0.859375 0.296875zm8.578796 -4.96875q0.140625 -0.296875 0.421875 -0.296875q0.1875 0 0.328125 0.125q0.140625 0.109375 0.140625 0.296875q0 0.109375 -0.046875 0.1875l-2.34375 5.046875q-0.0625 0.15625 -0.21875 0.25q-0.140625 0.078125 -0.3125 0.078125q-0.15625 0 -0.296875 -0.078125q-0.140625 -0.09375 -0.21875 -0.25l-2.328125 -5.046875q-0.046875 -0.078125 -0.046875 -0.171875q0 -0.1875 0.15625 -0.3125q0.15625 -0.140625 0.359375 -0.140625q0.109375 0 0.21875 0.078125q0.125 0.078125 0.1875 0.203125l2.0 4.5l2.0 -4.46875zm6.480545 4.296875q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375zm8.589676 -3.28125q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.328125l0 7.625q0 0.21875 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.234375 0 -0.359375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -0.640625q-0.265625 0.546875 -0.78125 0.84375q-0.5 0.296875 -1.1875 0.296875q-0.765625 0 -1.359375 -0.375q-0.578125 -0.390625 -0.90625 -1.078125q-0.328125 -0.6875 -0.328125 -1.59375q0 -0.90625 0.328125 -1.59375q0.328125 -0.6875 0.90625 -1.046875q0.59375 -0.375 1.359375 -0.375q0.6875 0 1.1875 0.296875q0.515625 0.296875 0.78125 0.84375l0 -3.203125q0 -0.21875 0.125 -0.34375q0.125 -0.125 0.359375 -0.125zm-2.25 7.796875q0.84375 0 1.296875 -0.578125q0.46875 -0.59375 0.46875 -1.65625q0 -1.0625 -0.46875 -1.640625q-0.453125 -0.578125 -1.296875 -0.578125q-0.859375 0 -1.34375 0.578125q-0.46875 0.578125 -0.46875 1.625q0 1.0625 0.46875 1.65625q0.484375 0.59375 1.34375 0.59375zm12.202805 -7.796875q0.21875 0 0.34375 0.140625q0.125 0.125 0.125 0.359375l0 7.59375q0 0.21875 -0.125 0.359375q-0.109375 0.125 -0.328125 0.125q-0.21875 0 -0.328125 -0.125q-0.109375 -0.140625 -0.109375 -0.359375l0 -6.125l-2.59375 4.984375q-0.171875 0.34375 -0.5 0.34375q-0.3125 0 -0.484375 -0.34375l-2.625 -4.921875l0 6.0625q0 0.21875 -0.109375 0.359375q-0.109375 0.125 -0.328125 0.125q-0.21875 0 -0.34375 -0.125q-0.109375 -0.140625 -0.109375 -0.359375l0 -7.59375q0 -0.234375 0.125 -0.359375q0.140625 -0.140625 0.359375 -0.140625q0.3125 0 0.484375 0.34375l3.046875 5.84375l3.015625 -5.84375q0.09375 -0.1875 0.203125 -0.265625q0.125 -0.078125 0.28125 -0.078125zm4.8576965 8.59375q-0.828125 0 -1.46875 -0.359375q-0.625 -0.375 -0.96875 -1.0625q-0.34375 -0.703125 -0.34375 -1.609375q0 -0.90625 0.34375 -1.59375q0.34375 -0.703125 0.96875 -1.0625q0.640625 -0.375 1.46875 -0.375q0.828125 0 1.453125 0.375q0.640625 0.359375 0.984375 1.0625q0.34375 0.6875 0.34375 1.59375q0 0.90625 -0.34375 1.609375q-0.34375 0.6875 -0.984375 1.0625q-0.625 0.359375 -1.453125 0.359375zm0 -0.796875q0.859375 0 1.3125 -0.5625q0.46875 -0.578125 0.46875 -1.671875q0 -1.0625 -0.46875 -1.640625q-0.46875 -0.59375 -1.3125 -0.59375q-0.859375 0 -1.328125 0.59375q-0.46875 0.578125 -0.46875 1.640625q0 1.078125 0.453125 1.65625q0.46875 0.578125 1.34375 0.578125zm8.925674 -7.796875q0.21875 0 0.34375 0.140625q0.140625 0.125 0.140625 0.328125l0 7.625q0 0.21875 -0.140625 0.359375q-0.125 0.125 -0.34375 0.125q-0.234375 0 -0.359375 -0.125q-0.125 -0.140625 -0.125 -0.359375l0 -0.640625q-0.265625 0.546875 -0.78125 0.84375q-0.5 0.296875 -1.1875 0.296875q-0.765625 0 -1.359375 -0.375q-0.578125 -0.390625 -0.90625 -1.078125q-0.328125 -0.6875 -0.328125 -1.59375q0 -0.90625 0.328125 -1.59375q0.328125 -0.6875 0.90625 -1.046875q0.59375 -0.375 1.359375 -0.375q0.6875 0 1.1875 0.296875q0.515625 0.296875 0.78125 0.84375l0 -3.203125q0 -0.21875 0.125 -0.34375q0.125 -0.125 0.359375 -0.125zm-2.25 7.796875q0.84375 0 1.296875 -0.578125q0.46875 -0.59375 0.46875 -1.65625q0 -1.0625 -0.46875 -1.640625q-0.453125 -0.578125 -1.296875 -0.578125q-0.859375 0 -1.34375 0.578125q-0.46875 0.578125 -0.46875 1.625q0 1.0625 0.46875 1.65625q0.484375 0.59375 1.34375 0.59375zm9.06218 -0.640625q0.140625 0 0.234375 0.109375q0.09375 0.109375 0.09375 0.28125q0 0.296875 -0.421875 0.546875q-0.4375 0.25 -0.921875 0.375q-0.46875 0.125 -0.921875 0.125q-1.359375 0 -2.15625 -0.796875q-0.78125 -0.8125 -0.78125 -2.21875q0 -0.90625 0.34375 -1.59375q0.359375 -0.6875 0.984375 -1.0625q0.640625 -0.390625 1.4375 -0.390625q1.140625 0 1.8125 0.75q0.671875 0.734375 0.671875 2.0q0 0.25 -0.09375 0.359375q-0.09375 0.109375 -0.3125 0.109375l-3.859375 0q0.09375 2.0625 1.953125 2.0625q0.46875 0 0.796875 -0.125q0.34375 -0.125 0.71875 -0.34375q0.3125 -0.1875 0.421875 -0.1875zm-2.09375 -3.875q-0.765625 0 -1.234375 0.484375q-0.46875 0.484375 -0.546875 1.359375l3.390625 0q-0.015625 -0.890625 -0.4375 -1.359375q-0.421875 -0.484375 -1.171875 -0.484375zm4.386551 5.296875q-0.21875 0 -0.359375 -0.125q-0.125 -0.125 -0.125 -0.359375l0 -7.625q0 -0.21875 0.125 -0.34375q0.140625 -0.125 0.359375 -0.125q0.203125 0 0.34375 0.125q0.140625 0.125 0.140625 0.34375l0 7.625q0 0.234375 -0.140625 0.359375q-0.140625 0.125 -0.34375 0.125z" fill-rule="nonzero"/><path fill="#000000" fill-opacity="0.0" d="m176.23885 99.34974l0 153.19684" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m176.23885 99.34974l0 149.76978" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m176.23885 249.1195l-1.124588 -1.124588l1.124588 3.0897675l1.124588 -3.0897675z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m176.23975 283.52823l0 17.950958l0.06298828 0l0 17.954529" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m176.23975 283.52823l0 17.950928l0.06298828 0l0 14.527496" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m176.30273 316.00665l-1.1245728 -1.1246033l1.1245728 3.0897827l1.124588 -3.0897827z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m75.62205 99.34843l0 153.19684" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m75.62205 99.34843l0 149.76978" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m75.62205 249.1182l-1.1245804 -1.124588l1.1245804 3.0897675l1.1245804 -3.0897675z" fill-rule="evenodd"/><path fill="#000000" fill-opacity="0.0" d="m99.50131 100.0l0 76.0l54.992126 0l0 76.0" fill-rule="evenodd"/><path stroke="#000000" stroke-width="1.0" stroke-linejoin="round" stroke-linecap="butt" d="m99.50131 100.0l0 76.0l54.992126 0l0 72.57292" fill-rule="evenodd"/><path fill="#000000" stroke="#000000" stroke-width="1.0" stroke-linecap="butt" d="m154.49344 248.5729l-1.124588 -1.1245728l1.124588 3.0897675l1.124588 -3.0897675z" fill-rule="evenodd"/></g></svg> \ No newline at end of file
diff --git a/tensorflow/contrib/lite/tools/optimize/quantize_weights.cc b/tensorflow/contrib/lite/tools/optimize/quantize_weights.cc
index e5bb3c990a..692efb9029 100644
--- a/tensorflow/contrib/lite/tools/optimize/quantize_weights.cc
+++ b/tensorflow/contrib/lite/tools/optimize/quantize_weights.cc
@@ -168,11 +168,16 @@ std::vector<TensorInfo> GetQuantizableTensorsFromOperator(
bool eval_hybrid = use_hybrid_evaluation && IsHybridEvaluationOp(op, op_code);
- bool skipped_tensor = false;
std::vector<int32_t> op_input_indices = GetWeightInputIndices(op_code);
for (const int32_t op_input_idx : op_input_indices) {
int32_t tensor_idx = op->inputs[op_input_idx];
+ if (tensor_idx == -1) {
+ LOG(INFO) << "Skipping optional tensor input " << op_input_idx
+ << " of operation " << EnumNameBuiltinOperator(op_code);
+ continue;
+ }
+
TensorT* tensor = subgraph->tensors[tensor_idx].get();
// TODO(suharshs): Support shared weights, i.e. If two tensors share the
// same weight array, things may break. (i.e. SSD object detection)
@@ -180,14 +185,12 @@ std::vector<TensorInfo> GetQuantizableTensorsFromOperator(
CountTensorConsumers(model, subgraph, tensor_idx) != 1) {
LOG(INFO) << "Skipping quantization of tensor " << tensor->name
<< " that is shared between multiple multiple operations.";
- skipped_tensor = true;
continue;
}
if (tensor->type != TensorType_FLOAT32) {
LOG(INFO) << "Skipping quantization of tensor " << tensor->name
<< " that is not type float.";
- skipped_tensor = true;
continue;
}
@@ -196,7 +199,9 @@ std::vector<TensorInfo> GetQuantizableTensorsFromOperator(
LOG(INFO) << "Skipping quantization of tensor " << tensor->name
<< " because it has fewer than " << weights_min_num_elements
<< " elements (" << num_elements << ").";
- skipped_tensor = true;
+ // If one of the weights isn't quantized, then we cannot use the hybrid
+ // kernel for this operation, since it expects everything to be quantized.
+ eval_hybrid = false;
continue;
}
@@ -209,12 +214,6 @@ std::vector<TensorInfo> GetQuantizableTensorsFromOperator(
tensor_infos.push_back(tensor_info);
}
- // For hybrid operations we either need to quantize all tensors or none. So
- // if we skipped any tensors we need to return no quantized tensors.
- if (eval_hybrid && skipped_tensor) {
- return {};
- }
-
return tensor_infos;
}
diff --git a/tensorflow/contrib/tpu/ops/cross_replica_ops.cc b/tensorflow/contrib/tpu/ops/cross_replica_ops.cc
index 9ee5ecb123..ea8e0e00ed 100644
--- a/tensorflow/contrib/tpu/ops/cross_replica_ops.cc
+++ b/tensorflow/contrib/tpu/ops/cross_replica_ops.cc
@@ -18,6 +18,89 @@ limitations under the License.
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
+using shape_inference::DimensionHandle;
+using shape_inference::InferenceContext;
+using shape_inference::ShapeHandle;
+
+REGISTER_OP("AllToAll")
+ .Input("input: T")
+ .Input("group_assignment: int32")
+ .Output("output: T")
+ .Attr("T: {bfloat16, float}")
+ .Attr("concat_dimension: int")
+ .Attr("split_dimension: int")
+ .Attr("split_count: int")
+ .SetShapeFn([](InferenceContext* c) {
+ ShapeHandle input = c->input(0);
+ int64 rank;
+ if (c->RankKnown(input)) {
+ rank = c->Rank(input);
+ } else {
+ return errors::InvalidArgument("input's rank is unknown.");
+ }
+ int concat_dimension;
+ int split_dimension;
+
+ TF_RETURN_IF_ERROR(c->GetAttr("concat_dimension", &concat_dimension));
+
+ if (concat_dimension < 0 || concat_dimension >= rank) {
+ return errors::InvalidArgument("concat_dimension ", concat_dimension,
+ " is out of range of input rank ", rank);
+ }
+
+ TF_RETURN_IF_ERROR(c->GetAttr("split_dimension", &split_dimension));
+ if (split_dimension < 0 || split_dimension >= rank) {
+ return errors::InvalidArgument("split_dimension ", split_dimension,
+ " is out of range of input rank ", rank);
+ }
+
+ std::vector<DimensionHandle> dims;
+ dims.resize(rank);
+
+ for (int32 i = 0; i < rank; ++i) {
+ int64 in_idx = i;
+ if (i == concat_dimension) {
+ in_idx = split_dimension;
+ } else if (i == split_dimension) {
+ in_idx = concat_dimension;
+ }
+
+ dims[i] = c->Dim(input, in_idx);
+ }
+
+ c->set_output(0, c->MakeShape(dims));
+ return Status::OK();
+ })
+ .Doc(R"doc(
+An Op to exchange data across TPU replicas. On each replica, the input is
+split into `split_count` blocks along `split_dimension` and send to the other
+replicas given group_assignment. After receiving `split_count` - 1 blocks from
+other replicas, we concatenate the blocks along `concat_dimension` as the
+output.
+
+For example, suppose there are 2 TPU replicas:
+replica 0 receives input: `[[A, B]]`
+replica 1 receives input: `[[C, D]]`
+
+group_assignment=`[[0, 1]]`
+concat_dimension=0
+split_dimension=1
+split_count=2
+
+replica 0's output: `[[A], [C]]`
+replica 1's output: `[[B], [D]]`
+
+input: The local input to the sum.
+group_assignment: An int32 tensor with shape
+ [num_groups, num_replicas_per_group]. `group_assignment[i]` represents the
+ replica ids in the ith subgroup.
+concat_dimension: The dimension number to concatenate.
+split_dimension: The dimension number to split.
+split_count: The number of splits, this number must equal to the sub-group
+ size(group_assignment.get_shape()[1])
+output: The exchanged result.
+T: The type of elements to be exchanged.
+)doc");
REGISTER_OP("CrossReplicaSum")
.Input("input: T")
@@ -26,10 +109,8 @@ REGISTER_OP("CrossReplicaSum")
.Attr("T: {bfloat16, float}")
.SetShapeFn(shape_inference::UnchangedShape)
.Doc(R"doc(
-An Op to sum inputs across replicated TPU instances. Each
-instance supplies its own input. If group_assignment is empty, the output of
-each is the sum of all the inputs, otherwise the output of each is the sum of
-the inputs belonging to the same group.
+An Op to sum inputs across replicated TPU instances. Each instance supplies its
+own input.
For example, suppose there are 8 TPU instances: `[A, B, C, D, E, F, G, H]`.
Passing group_assignment=`[[0,2,4,6],[1,3,5,7]]` sets `A, C, E, G` as group 0,
diff --git a/tensorflow/contrib/tpu/proto/optimization_parameters.proto b/tensorflow/contrib/tpu/proto/optimization_parameters.proto
index cbf6809257..fc1320501b 100644
--- a/tensorflow/contrib/tpu/proto/optimization_parameters.proto
+++ b/tensorflow/contrib/tpu/proto/optimization_parameters.proto
@@ -9,8 +9,8 @@ message ClippingLimits {
google.protobuf.FloatValue upper = 2; // +inf if not set
}
-// Get the learning rate from a <yet to be determined> source that can change
-// dynamically.
+// Get the learning rate from the parameters of the SendTPUEmbeddingGradients
+// op.
message DynamicLearningRate {
}
@@ -18,10 +18,8 @@ message DynamicLearningRate {
message LearningRate {
oneof learning_rate {
float constant = 1;
- // DynamicLearningRate dynamic = 2; -- disabled while code is being
- // rewritten.
+ DynamicLearningRate dynamic = 2;
}
- reserved 2;
}
message AdagradParameters {
diff --git a/tensorflow/contrib/tpu/python/ops/tpu_ops.py b/tensorflow/contrib/tpu/python/ops/tpu_ops.py
index 3ed571aff9..d92a0652bb 100644
--- a/tensorflow/contrib/tpu/python/ops/tpu_ops.py
+++ b/tensorflow/contrib/tpu/python/ops/tpu_ops.py
@@ -38,6 +38,62 @@ if platform.system() != "Windows":
_tpu_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("_tpu_ops.so"))
+ def _create_default_group_assignment():
+ num_shards = tpu_function.get_tpu_context().number_of_shards
+ if num_shards is None:
+ logging.warning(
+ "cross_replica_sum should be used within a tpu_shard_context, but "
+ "got unset number_of_shards. Assuming 1.")
+ num_shards = 1
+ group_assignment = [list(range(num_shards))]
+ return group_assignment
+
+ def all_to_all(x,
+ concat_dimension,
+ split_dimension,
+ split_count,
+ group_assignment=None,
+ name=None):
+ """Exchange data across TPU replicas.
+
+ Args:
+ x: The local tensor.
+ concat_dimension: The dimension number to concatenate.
+ split_dimension: The dimension number to split.
+ split_count: The number of splits, this number must equal to the sub-group
+ size(group_assignment.get_shape()[1])
+ group_assignment: Optional 2d int32 lists with shape [num_groups,
+ num_replicas_per_group]. `group_assignment[i]` represents the replica
+ ids in the ith subgroup.
+ name: Optional op name.
+
+ Returns:
+ A `Tensor` which is concatenated by data from different replicas.
+ """
+ if group_assignment is None:
+ group_assignment = _create_default_group_assignment()
+ return gen_tpu_ops.all_to_all(
+ x,
+ group_assignment,
+ concat_dimension=concat_dimension,
+ split_dimension=split_dimension,
+ split_count=split_count,
+ name=name)
+
+ @ops.RegisterGradient("AllToAll")
+ def _all_to_all_grad(op, grad):
+ # The gradient of a all-to-all is also a all-to-all but the
+ # split_dimension and concat_dimension is swapped.
+ # The graident with respect to group_assignment is None.
+ return [
+ gen_tpu_ops.all_to_all(
+ grad,
+ op.inputs[1],
+ concat_dimension=op.get_attr("split_dimension"),
+ split_dimension=op.get_attr("concat_dimension"),
+ split_count=op.get_attr("split_count")), None
+ ]
+
def cross_replica_sum(x, group_assignment=None, name=None):
"""Sum the input tensor accorss replicas according to group_assignment.
@@ -52,13 +108,7 @@ if platform.system() != "Windows":
A `Tensor` which is summed across replicas.
"""
if group_assignment is None:
- num_shards = tpu_function.get_tpu_context().number_of_shards
- if num_shards is None:
- logging.warning(
- "cross_replica_sum should be used within a tpu_shard_context, but "
- "got unset number_of_shards. Assuming 1.")
- num_shards = 1
- group_assignment = [list(range(num_shards))]
+ group_assignment = _create_default_group_assignment()
return gen_tpu_ops.cross_replica_sum(x, group_assignment, name=name)
diff --git a/tensorflow/contrib/tpu/python/tpu/keras_support.py b/tensorflow/contrib/tpu/python/tpu/keras_support.py
index dd7f8b678f..08e0465b71 100644
--- a/tensorflow/contrib/tpu/python/tpu/keras_support.py
+++ b/tensorflow/contrib/tpu/python/tpu/keras_support.py
@@ -1657,7 +1657,7 @@ class KerasTPUModel(models.Model):
'make sure your paths are correct and you have '
'permissions to read the files. Skipping validation')
- for step_index in range(steps_per_epoch - 1):
+ for step_index in range(steps_per_epoch):
batch_logs = {'batch': step_index, 'size': 1}
callbacks.on_batch_begin(step_index, batch_logs)
try:
diff --git a/tensorflow/contrib/tpu/python/tpu/keras_tpu_variables.py b/tensorflow/contrib/tpu/python/tpu/keras_tpu_variables.py
index a423aeace7..170977d8ab 100644
--- a/tensorflow/contrib/tpu/python/tpu/keras_tpu_variables.py
+++ b/tensorflow/contrib/tpu/python/tpu/keras_tpu_variables.py
@@ -30,7 +30,6 @@ from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import variable_scope
-from tensorflow.python.platform import tf_logging as logging
@contextlib.contextmanager
@@ -258,7 +257,6 @@ def replicated_scope(num_replicas):
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
kwargs["collections"] = []
- logging.info("Constructing replicated variable %s", name)
variables = []
index = {}
for i in range(num_replicas):
diff --git a/tensorflow/core/framework/dataset.cc b/tensorflow/core/framework/dataset.cc
index 9ffd8e1ee0..5281c56f04 100644
--- a/tensorflow/core/framework/dataset.cc
+++ b/tensorflow/core/framework/dataset.cc
@@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/core/graph/node_builder.h"
namespace tensorflow {
+namespace data {
namespace {
@@ -329,4 +330,5 @@ void BackgroundWorker::WorkerLoop() {
}
}
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/framework/dataset.h b/tensorflow/core/framework/dataset.h
index 04865a1d4f..4e51fba048 100644
--- a/tensorflow/core/framework/dataset.h
+++ b/tensorflow/core/framework/dataset.h
@@ -40,6 +40,13 @@ limitations under the License.
namespace tensorflow {
+// Forward declarations to avoid introducing a dependency on headers in
+// "tensorflow/core/graph/...".
+class GraphDefBuilder;
+class Node;
+
+namespace data {
+
class DatasetBase;
class SerializationContext;
@@ -66,11 +73,6 @@ class IteratorStateWriter {
virtual ~IteratorStateWriter() {}
};
-// Forward declarations to avoid introducing a dependency on headers in
-// "tensorflow/core/graph/...".
-class GraphDefBuilder;
-class Node;
-
// Wrapper around GraphDefBuilder. Used to serialize Dataset graph.
class GraphDefBuilderWrapper {
public:
@@ -222,8 +224,7 @@ class GraphDefBuilderWrapper {
return (str_util::EndsWith(op_def->name(), "Dataset") &&
op_def->output_arg_size() == 1 &&
op_def->output_arg(0).type() == DT_VARIANT) ||
- dataset::WhitelistedStatefulOpRegistry::Global()->Contains(
- op_def->name());
+ WhitelistedStatefulOpRegistry::Global()->Contains(op_def->name());
}
bool HasAttr(const string& op_type_name, const string& attr_name) const;
@@ -751,6 +752,21 @@ class BackgroundWorker {
std::deque<std::function<void()>> work_queue_ GUARDED_BY(mu_);
};
+} // namespace data
+
+// TODO(b/114112161): Remove these aliases when all users have moved over to the
+// `tensorflow::data` namespace.
+using data::DatasetBase;
+using data::DatasetContext;
+using data::DatasetIterator;
+using data::DatasetOpKernel;
+using data::IteratorBase;
+using data::IteratorContext;
+using data::IteratorStateReader;
+using data::IteratorStateWriter;
+using data::SerializationContext;
+using data::UnaryDatasetOpKernel;
+
} // namespace tensorflow
#endif // TENSORFLOW_CORE_FRAMEWORK_DATASET_H_
diff --git a/tensorflow/core/framework/dataset_stateful_op_whitelist.h b/tensorflow/core/framework/dataset_stateful_op_whitelist.h
index 3b48999edb..21c21723d0 100644
--- a/tensorflow/core/framework/dataset_stateful_op_whitelist.h
+++ b/tensorflow/core/framework/dataset_stateful_op_whitelist.h
@@ -19,7 +19,7 @@ limitations under the License.
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
-namespace dataset {
+namespace data {
// Registry for stateful ops that need to be used in dataset functions.
// See below macro for usage details.
class WhitelistedStatefulOpRegistry {
@@ -47,7 +47,7 @@ class WhitelistedStatefulOpRegistry {
std::set<StringPiece> op_names_;
};
-} // namespace dataset
+} // namespace data
// Use this macro to whitelist an op that is marked stateful but needs to be
// used inside a map_fn in an input pipeline. This is only needed if you wish
@@ -67,10 +67,9 @@ class WhitelistedStatefulOpRegistry {
WHITELIST_STATEFUL_OP_FOR_DATASET_FUNCTIONS_UNIQ_HELPER(__COUNTER__, name)
#define WHITELIST_STATEFUL_OP_FOR_DATASET_FUNCTIONS_UNIQ_HELPER(ctr, name) \
WHITELIST_STATEFUL_OP_FOR_DATASET_FUNCTIONS_UNIQ(ctr, name)
-#define WHITELIST_STATEFUL_OP_FOR_DATASET_FUNCTIONS_UNIQ(ctr, name) \
- static ::tensorflow::Status whitelist_op##ctr TF_ATTRIBUTE_UNUSED = \
- ::tensorflow::dataset::WhitelistedStatefulOpRegistry::Global()->Add( \
- name)
+#define WHITELIST_STATEFUL_OP_FOR_DATASET_FUNCTIONS_UNIQ(ctr, name) \
+ static ::tensorflow::Status whitelist_op##ctr TF_ATTRIBUTE_UNUSED = \
+ ::tensorflow::data::WhitelistedStatefulOpRegistry::Global()->Add(name)
} // namespace tensorflow
diff --git a/tensorflow/core/framework/stats_aggregator.h b/tensorflow/core/framework/stats_aggregator.h
index 4a18efc940..af53ed0a3c 100644
--- a/tensorflow/core/framework/stats_aggregator.h
+++ b/tensorflow/core/framework/stats_aggregator.h
@@ -25,6 +25,8 @@ namespace tensorflow {
class Summary;
+namespace data {
+
// A `StatsAggregator` accumulates statistics incrementally. A
// `StatsAggregator` can accumulate multiple different statistics, distinguished
// by a string name.
@@ -87,6 +89,7 @@ class StatsAggregatorResource : public ResourceBase {
const std::shared_ptr<StatsAggregator> stats_aggregator_;
};
+} // namespace data
} // namespace tensorflow
#endif // TENSORFLOW_CORE_FRAMEWORK_STATS_AGGREGATOR_H_
diff --git a/tensorflow/core/grappler/graph_analyzer/graph_analyzer.h b/tensorflow/core/grappler/graph_analyzer/graph_analyzer.h
index 26d38a4931..97626346c7 100644
--- a/tensorflow/core/grappler/graph_analyzer/graph_analyzer.h
+++ b/tensorflow/core/grappler/graph_analyzer/graph_analyzer.h
@@ -138,7 +138,7 @@ class GraphAnalyzer {
// The entries are owned by collation_map_, so must be removed from
// ordered_collation_ before removing them from collation_map_.
struct ReverseLessByCount {
- bool operator()(CollationEntry* left, CollationEntry* right) {
+ bool operator()(CollationEntry* left, CollationEntry* right) const {
return left->count > right->count; // Reverse order.
}
};
diff --git a/tensorflow/core/kernels/data/batch_dataset_op.cc b/tensorflow/core/kernels/data/batch_dataset_op.cc
index f9b5353724..a25f78c6f1 100644
--- a/tensorflow/core/kernels/data/batch_dataset_op.cc
+++ b/tensorflow/core/kernels/data/batch_dataset_op.cc
@@ -18,7 +18,7 @@ limitations under the License.
#include "tensorflow/core/util/batch_util.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -241,5 +241,5 @@ REGISTER_KERNEL_BUILDER(Name("BatchDatasetV2").Device(DEVICE_CPU),
BatchDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/cache_dataset_ops.cc b/tensorflow/core/kernels/data/cache_dataset_ops.cc
index 6ca0bcd37d..221b5ad835 100644
--- a/tensorflow/core/kernels/data/cache_dataset_ops.cc
+++ b/tensorflow/core/kernels/data/cache_dataset_ops.cc
@@ -20,7 +20,7 @@ limitations under the License.
#include "tensorflow/core/util/tensor_bundle/tensor_bundle.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level description of
@@ -891,5 +891,5 @@ REGISTER_KERNEL_BUILDER(Name("CacheDataset").Device(DEVICE_CPU),
CacheDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/captured_function.cc b/tensorflow/core/kernels/data/captured_function.cc
index 186740c2ac..ad2365b25b 100644
--- a/tensorflow/core/kernels/data/captured_function.cc
+++ b/tensorflow/core/kernels/data/captured_function.cc
@@ -23,6 +23,7 @@ limitations under the License.
#include "tensorflow/core/platform/notification.h"
namespace tensorflow {
+namespace data {
/* static */
Status CapturedFunction::Create(
@@ -418,4 +419,5 @@ CapturedFunction::CapturedFunction(const NameAttrList& func,
captured_inputs_(std::move(captured_inputs)),
use_inter_op_parallelism_(use_inter_op_parallelism) {}
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/captured_function.h b/tensorflow/core/kernels/data/captured_function.h
index ae6bdfc2a0..e44bc78b1c 100644
--- a/tensorflow/core/kernels/data/captured_function.h
+++ b/tensorflow/core/kernels/data/captured_function.h
@@ -32,6 +32,8 @@ class Device;
class OpKernelContext;
class ResourceMgr;
+namespace data {
+
// A `CapturedFunction` encapsulates a TensorFlow function and all of
// the runtime support required to execute it.
//
@@ -50,8 +52,8 @@ class CapturedFunction {
// Creates a new instance from a list of named attributes and captured inputs.
//
- // If `low_latency_hint` is true, the runtime may use an executor that is
- // optimized for small functions.
+ // If `use_inter_op_parallelism` is false, the runtime may use an executor
+ // that is optimized for small functions.
static Status Create(const NameAttrList& func,
std::vector<Tensor> captured_inputs,
bool use_inter_op_parallelism,
@@ -141,6 +143,12 @@ class CapturedFunction {
TF_DISALLOW_COPY_AND_ASSIGN(CapturedFunction);
};
+} // namespace data
+
+// TODO(b/114112161): Remove these aliases when all users have moved over to the
+// `tensorflow::data` namespace.
+using data::CapturedFunction;
+
} // namespace tensorflow
#endif // TENSORFLOW_CORE_KERNELS_DATA_CAPTURED_FUNCTION_H_
diff --git a/tensorflow/core/kernels/data/concatenate_dataset_op.cc b/tensorflow/core/kernels/data/concatenate_dataset_op.cc
index c361a9adcb..a04f150e71 100644
--- a/tensorflow/core/kernels/data/concatenate_dataset_op.cc
+++ b/tensorflow/core/kernels/data/concatenate_dataset_op.cc
@@ -17,7 +17,7 @@ limitations under the License.
#include "tensorflow/core/kernels/data/dataset.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -195,5 +195,5 @@ REGISTER_KERNEL_BUILDER(Name("ConcatenateDataset").Device(DEVICE_CPU),
ConcatenateDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/dataset_ops.cc b/tensorflow/core/kernels/data/dataset_ops.cc
index c71d027f23..bd1ccd5b5d 100644
--- a/tensorflow/core/kernels/data/dataset_ops.cc
+++ b/tensorflow/core/kernels/data/dataset_ops.cc
@@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/core/kernels/data/dataset.h"
namespace tensorflow {
+namespace data {
// See documentation in ../ops/dataset_ops.cc for a high-level
// description of the following op.
@@ -48,4 +49,5 @@ class DatasetToGraphOp : public OpKernel {
REGISTER_KERNEL_BUILDER(Name("DatasetToGraph").Device(DEVICE_CPU),
DatasetToGraphOp);
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/dataset_utils.cc b/tensorflow/core/kernels/data/dataset_utils.cc
index d85ef1cbab..e7ac368ae3 100644
--- a/tensorflow/core/kernels/data/dataset_utils.cc
+++ b/tensorflow/core/kernels/data/dataset_utils.cc
@@ -17,8 +17,7 @@ limitations under the License.
#include "tensorflow/core/common_runtime/device.h"
namespace tensorflow {
-
-namespace dataset {
+namespace data {
Status MakeIteratorFromInputElement(
IteratorContext* ctx, const std::vector<Tensor>& input_element,
@@ -45,6 +44,5 @@ Status MakeIteratorFromInputElement(
ctx, strings::StrCat(prefix, "[", thread_index, "]"), out_iterator);
}
-} // namespace dataset
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/dataset_utils.h b/tensorflow/core/kernels/data/dataset_utils.h
index 6c4191c2be..234856ea39 100644
--- a/tensorflow/core/kernels/data/dataset_utils.h
+++ b/tensorflow/core/kernels/data/dataset_utils.h
@@ -20,16 +20,14 @@ limitations under the License.
#include "tensorflow/core/kernels/data/dataset.h"
namespace tensorflow {
-
-namespace dataset {
+namespace data {
Status MakeIteratorFromInputElement(
IteratorContext* ctx, const std::vector<Tensor>& input_element,
int64 thread_index, CapturedFunction* captured_func, StringPiece prefix,
std::unique_ptr<IteratorBase>* out_iterator);
-} // namespace dataset
-
+} // namespace data
} // namespace tensorflow
#endif // TENSORFLOW_CORE_KERNELS_DATA_DATASET_UTILS_H_
diff --git a/tensorflow/core/kernels/data/dense_to_sparse_batch_dataset_op.cc b/tensorflow/core/kernels/data/dense_to_sparse_batch_dataset_op.cc
index 9770bc025d..237511a07d 100644
--- a/tensorflow/core/kernels/data/dense_to_sparse_batch_dataset_op.cc
+++ b/tensorflow/core/kernels/data/dense_to_sparse_batch_dataset_op.cc
@@ -18,7 +18,7 @@ limitations under the License.
#include "tensorflow/core/kernels/data/dataset.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -301,5 +301,5 @@ REGISTER_KERNEL_BUILDER(Name("DenseToSparseBatchDataset").Device(DEVICE_CPU),
DenseToSparseBatchDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/filter_by_component_dataset_op.cc b/tensorflow/core/kernels/data/filter_by_component_dataset_op.cc
index ce577397c5..a7e3a56727 100644
--- a/tensorflow/core/kernels/data/filter_by_component_dataset_op.cc
+++ b/tensorflow/core/kernels/data/filter_by_component_dataset_op.cc
@@ -21,7 +21,7 @@ limitations under the License.
#include "tensorflow/core/lib/random/random.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -166,5 +166,5 @@ REGISTER_KERNEL_BUILDER(Name("FilterByLastComponentDataset").Device(DEVICE_CPU),
FilterByLastComponentDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/filter_dataset_op.cc b/tensorflow/core/kernels/data/filter_dataset_op.cc
index bbce001eaf..bf0aecaf3c 100644
--- a/tensorflow/core/kernels/data/filter_dataset_op.cc
+++ b/tensorflow/core/kernels/data/filter_dataset_op.cc
@@ -21,7 +21,7 @@ limitations under the License.
#include "tensorflow/core/lib/random/random.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -280,5 +280,5 @@ REGISTER_KERNEL_BUILDER(Name("FilterDataset").Device(DEVICE_CPU),
FilterDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/flat_map_dataset_op.cc b/tensorflow/core/kernels/data/flat_map_dataset_op.cc
index b1eb2fd849..e3c45ef86c 100644
--- a/tensorflow/core/kernels/data/flat_map_dataset_op.cc
+++ b/tensorflow/core/kernels/data/flat_map_dataset_op.cc
@@ -21,7 +21,7 @@ limitations under the License.
#include "tensorflow/core/lib/random/random.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -245,7 +245,7 @@ class FlatMapDatasetOp : public UnaryDatasetOpKernel {
private:
Status BuildCurrentElementIteratorLocked(IteratorContext* ctx)
EXCLUSIVE_LOCKS_REQUIRED(mu_) {
- return dataset::MakeIteratorFromInputElement(
+ return MakeIteratorFromInputElement(
ctx, captured_func_inputs_, element_index_++,
dataset()->captured_func_.get(), prefix(),
&current_element_iterator_);
@@ -285,5 +285,5 @@ REGISTER_KERNEL_BUILDER(Name("FlatMapDataset").Device(DEVICE_CPU),
FlatMapDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/generator_dataset_op.cc b/tensorflow/core/kernels/data/generator_dataset_op.cc
index ccee690d7e..ac5cc1b2c1 100644
--- a/tensorflow/core/kernels/data/generator_dataset_op.cc
+++ b/tensorflow/core/kernels/data/generator_dataset_op.cc
@@ -23,6 +23,7 @@ limitations under the License.
#include "tensorflow/core/lib/random/random.h"
namespace tensorflow {
+namespace data {
// See documentation in ../ops/dataset_ops.cc for a high-level
// description of the following op.
@@ -188,10 +189,13 @@ void GeneratorDatasetOp::MakeDataset(OpKernelContext* ctx,
std::move(finalize_func), output_types_, output_shapes_);
}
+namespace {
REGISTER_KERNEL_BUILDER(Name("GeneratorDataset").Device(DEVICE_CPU),
GeneratorDatasetOp);
REGISTER_KERNEL_BUILDER(
Name("GeneratorDataset").Device(DEVICE_GPU).HostMemory("handle"),
GeneratorDatasetOp);
+} // namespace
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/generator_dataset_op.h b/tensorflow/core/kernels/data/generator_dataset_op.h
index 8407543136..d23ed97ec3 100644
--- a/tensorflow/core/kernels/data/generator_dataset_op.h
+++ b/tensorflow/core/kernels/data/generator_dataset_op.h
@@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
+namespace data {
class GeneratorDatasetOp : public DatasetOpKernel {
public:
@@ -36,5 +37,6 @@ class GeneratorDatasetOp : public DatasetOpKernel {
NameAttrList finalize_func_;
};
+} // namespace data
} // namespace tensorflow
#endif // TENSORFLOW_CORE_KERNELS_DATA_GENERATOR_DATASET_OP_H_
diff --git a/tensorflow/core/kernels/data/group_by_reducer_dataset_op.cc b/tensorflow/core/kernels/data/group_by_reducer_dataset_op.cc
index 130f04da3e..d6ee42a7c6 100644
--- a/tensorflow/core/kernels/data/group_by_reducer_dataset_op.cc
+++ b/tensorflow/core/kernels/data/group_by_reducer_dataset_op.cc
@@ -22,6 +22,7 @@ limitations under the License.
#include "tensorflow/core/lib/random/random.h"
namespace tensorflow {
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -433,4 +434,5 @@ REGISTER_KERNEL_BUILDER(Name("GroupByReducerDataset").Device(DEVICE_CPU),
GroupByReducerDatasetOp);
} // namespace
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/group_by_window_dataset_op.cc b/tensorflow/core/kernels/data/group_by_window_dataset_op.cc
index 46a3185b49..e4fa557598 100644
--- a/tensorflow/core/kernels/data/group_by_window_dataset_op.cc
+++ b/tensorflow/core/kernels/data/group_by_window_dataset_op.cc
@@ -23,6 +23,7 @@ limitations under the License.
#include "tensorflow/core/lib/random/random.h"
namespace tensorflow {
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -549,4 +550,5 @@ REGISTER_KERNEL_BUILDER(Name("GroupByWindowDataset").Device(DEVICE_CPU),
GroupByWindowDatasetOp);
} // namespace
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/interleave_dataset_op.cc b/tensorflow/core/kernels/data/interleave_dataset_op.cc
index 716e040277..0768f46665 100644
--- a/tensorflow/core/kernels/data/interleave_dataset_op.cc
+++ b/tensorflow/core/kernels/data/interleave_dataset_op.cc
@@ -21,7 +21,7 @@ limitations under the License.
#include "tensorflow/core/lib/random/random.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -201,7 +201,7 @@ class InterleaveDatasetOp : public UnaryDatasetOpKernel {
TF_RETURN_IF_ERROR(input_impl_->GetNext(
ctx, &args_list_[cycle_index_], &end_of_input_));
if (!end_of_input_) {
- TF_RETURN_IF_ERROR(dataset::MakeIteratorFromInputElement(
+ TF_RETURN_IF_ERROR(MakeIteratorFromInputElement(
ctx, args_list_[cycle_index_], cycle_index_,
dataset()->captured_func_.get(), prefix(),
&current_elements_[cycle_index_]));
@@ -288,7 +288,7 @@ class InterleaveDatasetOp : public UnaryDatasetOpKernel {
full_name(strings::StrCat("args_list_[", idx, "][", i, "]")),
&args_list_[idx][i]));
}
- TF_RETURN_IF_ERROR(dataset::MakeIteratorFromInputElement(
+ TF_RETURN_IF_ERROR(MakeIteratorFromInputElement(
ctx, args_list_[idx], idx, dataset()->captured_func_.get(),
prefix(), &current_elements_[idx]));
TF_RETURN_IF_ERROR(
@@ -330,5 +330,5 @@ REGISTER_KERNEL_BUILDER(Name("InterleaveDataset").Device(DEVICE_CPU),
InterleaveDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/iterator_ops.cc b/tensorflow/core/kernels/data/iterator_ops.cc
index 4e9b280968..fe6d705eab 100644
--- a/tensorflow/core/kernels/data/iterator_ops.cc
+++ b/tensorflow/core/kernels/data/iterator_ops.cc
@@ -36,7 +36,7 @@ limitations under the License.
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -236,6 +236,8 @@ class IteratorResource : public ResourceBase {
const std::vector<PartialTensorShape> output_shapes_;
};
+namespace {
+
// Helper class for reading data from a VariantTensorData object.
class VariantTensorDataReader : public IteratorStateReader {
public:
@@ -443,6 +445,8 @@ class IteratorStateVariant {
REGISTER_UNARY_VARIANT_DECODE_FUNCTION(IteratorStateVariant,
kIteratorVariantTypeName);
+} // namespace
+
// Note that IteratorHandleOp holds a reference to the resource it creates. If
// cleaning up resources with DestroyResourceOp is important, consider creating
// resource containers with AnonymousIteratorHandleOp instead.
@@ -622,6 +626,8 @@ void MakeIteratorOp::Compute(OpKernelContext* ctx) {
OP_REQUIRES_OK(ctx, iterator_resource->set_iterator(std::move(iterator)));
}
+namespace {
+
class ToSingleElementOp : public AsyncOpKernel {
public:
explicit ToSingleElementOp(OpKernelConstruction* ctx)
@@ -887,6 +893,8 @@ class OneShotIteratorOp : public AsyncOpKernel {
const int graph_def_version_;
};
+} // namespace
+
void IteratorGetNextOp::ComputeAsync(OpKernelContext* ctx, DoneCallback done) {
IteratorResource* iterator;
OP_REQUIRES_OK_ASYNC(
@@ -957,6 +965,8 @@ void IteratorGetNextSyncOp::Compute(OpKernelContext* ctx) {
}
}
+namespace {
+
class IteratorGetNextAsOptionalOp : public AsyncOpKernel {
public:
explicit IteratorGetNextAsOptionalOp(OpKernelConstruction* ctx)
@@ -1037,6 +1047,8 @@ class IteratorGetNextAsOptionalOp : public AsyncOpKernel {
std::vector<PartialTensorShape> output_shapes_;
};
+} // namespace
+
void IteratorToStringHandleOp::Compute(OpKernelContext* ctx) {
const Tensor& resource_handle_t = ctx->input(0);
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(resource_handle_t.shape()),
@@ -1108,6 +1120,8 @@ void IteratorFromStringHandleOp::Compute(OpKernelContext* ctx) {
resource_handle_t->scalar<ResourceHandle>()() = resource_handle;
}
+namespace {
+
class SerializeIteratorOp : public OpKernel {
public:
explicit SerializeIteratorOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
@@ -1202,4 +1216,7 @@ REGISTER_KERNEL_BUILDER(Name("SerializeIterator").Device(DEVICE_CPU),
REGISTER_KERNEL_BUILDER(Name("DeserializeIterator").Device(DEVICE_CPU),
DeserializeIteratorOp);
+} // namespace
+
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/iterator_ops.h b/tensorflow/core/kernels/data/iterator_ops.h
index 723564286c..8a2b2639a7 100644
--- a/tensorflow/core/kernels/data/iterator_ops.h
+++ b/tensorflow/core/kernels/data/iterator_ops.h
@@ -22,6 +22,7 @@ limitations under the License.
#include "tensorflow/core/kernels/ops_util.h"
namespace tensorflow {
+namespace data {
class IteratorResource;
@@ -142,6 +143,7 @@ class IteratorFromStringHandleOp : public OpKernel {
std::vector<PartialTensorShape> output_shapes_;
};
+} // namespace data
} // namespace tensorflow
#endif // TENSORFLOW_CORE_KERNELS_DATA_ITERATOR_OPS_H_
diff --git a/tensorflow/core/kernels/data/map_and_batch_dataset_op.cc b/tensorflow/core/kernels/data/map_and_batch_dataset_op.cc
index 8b0c9ad6b2..27c89b3661 100644
--- a/tensorflow/core/kernels/data/map_and_batch_dataset_op.cc
+++ b/tensorflow/core/kernels/data/map_and_batch_dataset_op.cc
@@ -29,7 +29,7 @@ limitations under the License.
#include "tensorflow/core/platform/tracing.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -675,5 +675,5 @@ REGISTER_KERNEL_BUILDER(Name("MapAndBatchDatasetV2").Device(DEVICE_CPU),
MapAndBatchDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/map_dataset_op.cc b/tensorflow/core/kernels/data/map_dataset_op.cc
index 6c45fcafcc..306486b96a 100644
--- a/tensorflow/core/kernels/data/map_dataset_op.cc
+++ b/tensorflow/core/kernels/data/map_dataset_op.cc
@@ -20,7 +20,7 @@ limitations under the License.
#include "tensorflow/core/lib/random/random.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -196,5 +196,5 @@ class MapDatasetOp : public UnaryDatasetOpKernel {
REGISTER_KERNEL_BUILDER(Name("MapDataset").Device(DEVICE_CPU), MapDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/map_defun_op.cc b/tensorflow/core/kernels/data/map_defun_op.cc
index 607d0ca028..3c562fc7f3 100644
--- a/tensorflow/core/kernels/data/map_defun_op.cc
+++ b/tensorflow/core/kernels/data/map_defun_op.cc
@@ -23,13 +23,13 @@ limitations under the License.
#include "tensorflow/core/util/reffed_status_callback.h"
namespace tensorflow {
+namespace data {
namespace {
void SetRunOptions(OpKernelContext* ctx, FunctionLibraryRuntime::Options* opts,
bool always_collect_stats) {
opts->step_id = ctx->step_id();
opts->rendezvous = ctx->rendezvous();
- opts->cancellation_manager = ctx->cancellation_manager();
if (always_collect_stats) {
opts->stats_collector = ctx->stats_collector();
}
@@ -117,10 +117,13 @@ class MapDefunOp : public AsyncOpKernel {
for (size_t i = 0; i < static_cast<size_t>(batch_size); ++i) {
auto* call_frame =
new MapFunctionCallFrame(*args, *arg_shapes, output, this, i);
+ CancellationManager* c_mgr = new CancellationManager;
+ opts_.cancellation_manager = c_mgr;
ctx->function_library()->Run(
opts_, func_handle_, call_frame,
- [call_frame, refcounted](const Status& func_status) {
+ [call_frame, refcounted, c_mgr](const Status& func_status) {
delete call_frame;
+ delete c_mgr;
refcounted->UpdateStatus(func_status);
refcounted->Unref();
});
@@ -189,8 +192,9 @@ class MapDefunOp : public AsyncOpKernel {
const OpKernel* kernel_;
const size_t iter_;
};
-}; // namespace
+};
REGISTER_KERNEL_BUILDER(Name("MapDefun").Device(DEVICE_CPU), MapDefunOp);
} // namespace
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/optimize_dataset_op.cc b/tensorflow/core/kernels/data/optimize_dataset_op.cc
index 6263dc3cf8..d5b725eac9 100644
--- a/tensorflow/core/kernels/data/optimize_dataset_op.cc
+++ b/tensorflow/core/kernels/data/optimize_dataset_op.cc
@@ -33,6 +33,7 @@ limitations under the License.
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -270,4 +271,5 @@ REGISTER_KERNEL_BUILDER(Name("OptimizeDataset").Device(DEVICE_CPU),
OptimizeDatasetOp);
} // namespace
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/optional_ops.cc b/tensorflow/core/kernels/data/optional_ops.cc
index cfac45dbc7..b372d31a93 100644
--- a/tensorflow/core/kernels/data/optional_ops.cc
+++ b/tensorflow/core/kernels/data/optional_ops.cc
@@ -20,6 +20,7 @@ limitations under the License.
#include "tensorflow/core/framework/variant_op_registry.h"
namespace tensorflow {
+namespace data {
namespace {
const char kOptionalVariantTypeName[] = "tensorflow::data::Optional";
@@ -267,4 +268,5 @@ Status WriteOptionalNoneToOutput(OpKernelContext* ctx, int output_index) {
return Status::OK();
}
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/optional_ops.h b/tensorflow/core/kernels/data/optional_ops.h
index 6f25567678..2cbf2933f5 100644
--- a/tensorflow/core/kernels/data/optional_ops.h
+++ b/tensorflow/core/kernels/data/optional_ops.h
@@ -21,6 +21,7 @@ limitations under the License.
#include "tensorflow/core/framework/variant_tensor_data.h"
namespace tensorflow {
+namespace data {
// Stores a DT_VARIANT value representing an Optional with the given value
// in the `output_index`^th output of the given kernel execution context.
@@ -31,6 +32,7 @@ Status WriteOptionalWithValueToOutput(OpKernelContext* ctx, int output_index,
// in the `output_index`^th output of the given kernel execution context.
Status WriteOptionalNoneToOutput(OpKernelContext* ctx, int output_index);
+} // namespace data
} // namespace tensorflow
#endif // TENSORFLOW_CORE_KERNELS_DATA_OPTIONAL_OPS_H_
diff --git a/tensorflow/core/kernels/data/padded_batch_dataset_op.cc b/tensorflow/core/kernels/data/padded_batch_dataset_op.cc
index be45eac46e..fd0e6c4cd0 100644
--- a/tensorflow/core/kernels/data/padded_batch_dataset_op.cc
+++ b/tensorflow/core/kernels/data/padded_batch_dataset_op.cc
@@ -19,7 +19,7 @@ limitations under the License.
#include "tensorflow/core/util/batch_util.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -382,5 +382,5 @@ REGISTER_KERNEL_BUILDER(Name("PaddedBatchDatasetV2").Device(DEVICE_CPU),
PaddedBatchDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/parallel_interleave_dataset_op.cc b/tensorflow/core/kernels/data/parallel_interleave_dataset_op.cc
index f6b3fd97e3..f8287cf0e3 100644
--- a/tensorflow/core/kernels/data/parallel_interleave_dataset_op.cc
+++ b/tensorflow/core/kernels/data/parallel_interleave_dataset_op.cc
@@ -25,7 +25,7 @@ limitations under the License.
#include "tensorflow/core/lib/random/random.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -684,7 +684,7 @@ class ParallelInterleaveDatasetOp : public UnaryDatasetOpKernel {
{
tf_shared_lock l(ckpt_mu_);
worker_thread_states_[thread_index].iterator_creation_status =
- dataset::MakeIteratorFromInputElement(
+ MakeIteratorFromInputElement(
ctx.get(), worker_thread_states_[thread_index].input,
thread_index, dataset()->captured_func_.get(), prefix(),
&worker_thread_states_[thread_index].iterator);
@@ -914,7 +914,7 @@ class ParallelInterleaveDatasetOp : public UnaryDatasetOpKernel {
worker_thread_states_[index].iterator.reset();
} else {
std::unique_ptr<IteratorBase> iterator;
- Status s = dataset::MakeIteratorFromInputElement(
+ Status s = MakeIteratorFromInputElement(
ctx, worker_thread_states_[index].input, index,
dataset()->captured_func_.get(), prefix(), &iterator);
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, iterator));
@@ -1068,5 +1068,5 @@ REGISTER_KERNEL_BUILDER(Name("ParallelInterleaveDataset").Device(DEVICE_CPU),
ParallelInterleaveDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/parallel_map_dataset_op.cc b/tensorflow/core/kernels/data/parallel_map_dataset_op.cc
index bff54813d6..ac5ed286ee 100644
--- a/tensorflow/core/kernels/data/parallel_map_dataset_op.cc
+++ b/tensorflow/core/kernels/data/parallel_map_dataset_op.cc
@@ -24,7 +24,7 @@ limitations under the License.
#include "tensorflow/core/lib/random/random.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -180,5 +180,5 @@ REGISTER_KERNEL_BUILDER(Name("ParallelMapDataset").Device(DEVICE_CPU),
ParallelMapDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/parallel_map_iterator.cc b/tensorflow/core/kernels/data/parallel_map_iterator.cc
index 61f8139b9e..4ae742aaaf 100644
--- a/tensorflow/core/kernels/data/parallel_map_iterator.cc
+++ b/tensorflow/core/kernels/data/parallel_map_iterator.cc
@@ -20,6 +20,7 @@ limitations under the License.
#include <vector>
namespace tensorflow {
+namespace data {
namespace {
class ParallelMapIterator : public DatasetBaseIterator {
@@ -333,4 +334,5 @@ std::unique_ptr<IteratorBase> NewParallelMapIterator(
std::move(map_func), num_parallel_calls));
}
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/parallel_map_iterator.h b/tensorflow/core/kernels/data/parallel_map_iterator.h
index 7e6cc586f3..dc26c5cf25 100644
--- a/tensorflow/core/kernels/data/parallel_map_iterator.h
+++ b/tensorflow/core/kernels/data/parallel_map_iterator.h
@@ -20,6 +20,7 @@ limitations under the License.
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
+namespace data {
// A function that transforms elements of one dataset into another
// asynchronously. The arguments are:
@@ -47,6 +48,7 @@ std::unique_ptr<IteratorBase> NewParallelMapIterator(
const DatasetBase* input_dataset, ParallelMapIteratorFunction map_func,
int32 num_parallel_calls);
+} // namespace data
} // namespace tensorflow
#endif // TENSORFLOW_CORE_KERNELS_DATA_PARALLEL_MAP_ITERATOR_H_
diff --git a/tensorflow/core/kernels/data/parse_example_dataset_op.cc b/tensorflow/core/kernels/data/parse_example_dataset_op.cc
index 9057800d94..0cf5db017b 100644
--- a/tensorflow/core/kernels/data/parse_example_dataset_op.cc
+++ b/tensorflow/core/kernels/data/parse_example_dataset_op.cc
@@ -20,7 +20,7 @@ limitations under the License.
#include "tensorflow/core/util/example_proto_fast_parsing.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -368,5 +368,5 @@ REGISTER_KERNEL_BUILDER(Name("ParseExampleDataset").Device(DEVICE_CPU),
ParseExampleDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/prefetch_autotuner.cc b/tensorflow/core/kernels/data/prefetch_autotuner.cc
index b3272f6bcd..533d0bd5d2 100644
--- a/tensorflow/core/kernels/data/prefetch_autotuner.cc
+++ b/tensorflow/core/kernels/data/prefetch_autotuner.cc
@@ -16,6 +16,7 @@ limitations under the License.
#include "tensorflow/core/kernels/data/prefetch_autotuner.h"
namespace tensorflow {
+namespace data {
PrefetchAutotuner::PrefetchAutotuner(int64 initial_buffer_size)
: buffer_limit_(initial_buffer_size) {
@@ -43,4 +44,5 @@ void PrefetchAutotuner::RecordConsumption(size_t current_buffer_size) {
}
}
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/prefetch_autotuner.h b/tensorflow/core/kernels/data/prefetch_autotuner.h
index fa8a184072..8693205512 100644
--- a/tensorflow/core/kernels/data/prefetch_autotuner.h
+++ b/tensorflow/core/kernels/data/prefetch_autotuner.h
@@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
+namespace data {
// PrefetchAutotuner dynamically adjusts the buffer size of a prefetch iterator.
//
@@ -66,6 +67,7 @@ class PrefetchAutotuner {
Mode mode_ = Mode::kDisabled;
};
+} // namespace data
} // namespace tensorflow
#endif // TENSORFLOW_CORE_KERNELS_DATA_PREFETCH_AUTOTUNER_H_
diff --git a/tensorflow/core/kernels/data/prefetch_autotuner_test.cc b/tensorflow/core/kernels/data/prefetch_autotuner_test.cc
index 29a8cc50cd..cfc324fc7e 100644
--- a/tensorflow/core/kernels/data/prefetch_autotuner_test.cc
+++ b/tensorflow/core/kernels/data/prefetch_autotuner_test.cc
@@ -18,6 +18,7 @@ limitations under the License.
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
+namespace data {
namespace {
TEST(PrefetchAutotuner, Disabled) {
@@ -79,4 +80,5 @@ TEST(PrefetchAutotuner, EnabledSteady) {
}
} // namespace
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/prefetch_dataset_op.cc b/tensorflow/core/kernels/data/prefetch_dataset_op.cc
index 50efbcbe2a..a7a2935195 100644
--- a/tensorflow/core/kernels/data/prefetch_dataset_op.cc
+++ b/tensorflow/core/kernels/data/prefetch_dataset_op.cc
@@ -21,6 +21,7 @@ limitations under the License.
#include "tensorflow/core/lib/core/error_codes.pb.h"
namespace tensorflow {
+namespace data {
// See documentation in ../ops/dataset_ops.cc for a high-level
// description of the following op.
@@ -346,6 +347,7 @@ void PrefetchDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
*output = new Dataset(ctx, input, buffer_size);
}
+namespace {
REGISTER_KERNEL_BUILDER(Name("PrefetchDataset").Device(DEVICE_CPU),
PrefetchDatasetOp);
REGISTER_KERNEL_BUILDER(Name("PrefetchDataset")
@@ -354,4 +356,7 @@ REGISTER_KERNEL_BUILDER(Name("PrefetchDataset")
.HostMemory("input_dataset")
.HostMemory("handle"),
PrefetchDatasetOp);
+} // namespace
+
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/prefetch_dataset_op.h b/tensorflow/core/kernels/data/prefetch_dataset_op.h
index c40c4b00da..588fb25a06 100644
--- a/tensorflow/core/kernels/data/prefetch_dataset_op.h
+++ b/tensorflow/core/kernels/data/prefetch_dataset_op.h
@@ -20,6 +20,7 @@ limitations under the License.
#include "tensorflow/core/kernels/data/prefetch_autotuner.h"
namespace tensorflow {
+namespace data {
class PrefetchDatasetOp : public UnaryDatasetOpKernel {
public:
@@ -34,6 +35,7 @@ class PrefetchDatasetOp : public UnaryDatasetOpKernel {
class Dataset;
};
+} // namespace data
} // namespace tensorflow
#endif // TENSORFLOW_CORE_KERNELS_DATA_PREFETCH_DATASET_OP_H_
diff --git a/tensorflow/core/kernels/data/random_dataset_op.cc b/tensorflow/core/kernels/data/random_dataset_op.cc
index 7817170e73..044a791a3f 100644
--- a/tensorflow/core/kernels/data/random_dataset_op.cc
+++ b/tensorflow/core/kernels/data/random_dataset_op.cc
@@ -21,7 +21,7 @@ limitations under the License.
#include "tensorflow/core/lib/random/random_distributions.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -151,5 +151,5 @@ REGISTER_KERNEL_BUILDER(Name("RandomDataset").Device(DEVICE_CPU),
RandomDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/range_dataset_op.cc b/tensorflow/core/kernels/data/range_dataset_op.cc
index aa38775125..89fbaae369 100644
--- a/tensorflow/core/kernels/data/range_dataset_op.cc
+++ b/tensorflow/core/kernels/data/range_dataset_op.cc
@@ -17,7 +17,7 @@ limitations under the License.
#include "tensorflow/core/kernels/data/dataset.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -142,5 +142,5 @@ REGISTER_KERNEL_BUILDER(Name("RangeDataset").Device(DEVICE_CPU),
RangeDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/reader_dataset_ops.cc b/tensorflow/core/kernels/data/reader_dataset_ops.cc
index 086b552936..c474cb4773 100644
--- a/tensorflow/core/kernels/data/reader_dataset_ops.cc
+++ b/tensorflow/core/kernels/data/reader_dataset_ops.cc
@@ -23,7 +23,7 @@ limitations under the License.
#include "tensorflow/core/lib/io/zlib_inputstream.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -691,5 +691,5 @@ REGISTER_KERNEL_BUILDER(Name("TFRecordDataset").Device(DEVICE_CPU),
TFRecordDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/repeat_dataset_op.cc b/tensorflow/core/kernels/data/repeat_dataset_op.cc
index 299949b99f..94e96635ab 100644
--- a/tensorflow/core/kernels/data/repeat_dataset_op.cc
+++ b/tensorflow/core/kernels/data/repeat_dataset_op.cc
@@ -17,7 +17,7 @@ limitations under the License.
#include "tensorflow/core/kernels/data/dataset.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -250,5 +250,5 @@ REGISTER_KERNEL_BUILDER(Name("RepeatDataset").Device(DEVICE_CPU),
RepeatDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/scan_dataset_op.cc b/tensorflow/core/kernels/data/scan_dataset_op.cc
index fccad933d0..6e515d6cc8 100644
--- a/tensorflow/core/kernels/data/scan_dataset_op.cc
+++ b/tensorflow/core/kernels/data/scan_dataset_op.cc
@@ -23,7 +23,7 @@ limitations under the License.
#include "tensorflow/core/lib/random/random.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -279,5 +279,5 @@ class ScanDatasetOp : public UnaryDatasetOpKernel {
REGISTER_KERNEL_BUILDER(Name("ScanDataset").Device(DEVICE_CPU), ScanDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/shuffle_dataset_op.cc b/tensorflow/core/kernels/data/shuffle_dataset_op.cc
index 93a4376836..66466d6a36 100644
--- a/tensorflow/core/kernels/data/shuffle_dataset_op.cc
+++ b/tensorflow/core/kernels/data/shuffle_dataset_op.cc
@@ -25,7 +25,7 @@ limitations under the License.
#include "tensorflow/core/util/ptr_util.h"
namespace tensorflow {
-
+namespace data {
namespace {
const int64 kLogIntervalMicros = 10 * 1000000; // 10 seconds.
@@ -620,5 +620,5 @@ REGISTER_KERNEL_BUILDER(Name("ShuffleAndRepeatDataset").Device(DEVICE_CPU),
ShuffleAndRepeatDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/single_threaded_executor.cc b/tensorflow/core/kernels/data/single_threaded_executor.cc
index e785b8b4d5..5b084a16f0 100644
--- a/tensorflow/core/kernels/data/single_threaded_executor.cc
+++ b/tensorflow/core/kernels/data/single_threaded_executor.cc
@@ -22,6 +22,7 @@ limitations under the License.
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
+namespace data {
namespace {
typedef gtl::InlinedVector<TensorValue, 4> TensorValueVec;
@@ -375,4 +376,5 @@ Status NewSingleThreadedExecutor(const LocalExecutorParams& params,
return Status::OK();
}
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/single_threaded_executor.h b/tensorflow/core/kernels/data/single_threaded_executor.h
index 15836b24c9..e934352a1d 100644
--- a/tensorflow/core/kernels/data/single_threaded_executor.h
+++ b/tensorflow/core/kernels/data/single_threaded_executor.h
@@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/core/common_runtime/executor.h"
namespace tensorflow {
+namespace data {
// Creates a new `Executor` for executing `graph` synchronously on the caller
// thread.
@@ -55,6 +56,7 @@ Status NewSingleThreadedExecutor(const LocalExecutorParams& params,
std::unique_ptr<const Graph> graph,
Executor** executor);
+} // namespace data
} // namespace tensorflow
#endif // TENSORFLOW_CORE_KERNELS_DATA_SINGLE_THREADED_EXECUTOR_H_
diff --git a/tensorflow/core/kernels/data/single_threaded_executor_test.cc b/tensorflow/core/kernels/data/single_threaded_executor_test.cc
index f8b5769197..6244e287bb 100644
--- a/tensorflow/core/kernels/data/single_threaded_executor_test.cc
+++ b/tensorflow/core/kernels/data/single_threaded_executor_test.cc
@@ -37,6 +37,7 @@ limitations under the License.
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
+namespace data {
namespace {
class ExecutorTest : public ::testing::Test {
@@ -327,4 +328,5 @@ BENCHMARK(BM_FeedInputFetchOutput);
#endif
} // namespace
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/skip_dataset_op.cc b/tensorflow/core/kernels/data/skip_dataset_op.cc
index fe7ef38d5f..b8c7fb15f4 100644
--- a/tensorflow/core/kernels/data/skip_dataset_op.cc
+++ b/tensorflow/core/kernels/data/skip_dataset_op.cc
@@ -17,7 +17,7 @@ limitations under the License.
#include "tensorflow/core/kernels/data/dataset.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -187,5 +187,5 @@ class SkipDatasetOp : public UnaryDatasetOpKernel {
REGISTER_KERNEL_BUILDER(Name("SkipDataset").Device(DEVICE_CPU), SkipDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/slide_dataset_op.cc b/tensorflow/core/kernels/data/slide_dataset_op.cc
index 14df3a6801..1e73cfc753 100644
--- a/tensorflow/core/kernels/data/slide_dataset_op.cc
+++ b/tensorflow/core/kernels/data/slide_dataset_op.cc
@@ -23,7 +23,7 @@ limitations under the License.
#include "tensorflow/core/util/batch_util.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -293,5 +293,5 @@ REGISTER_KERNEL_BUILDER(Name("SlideDataset").Device(DEVICE_CPU),
SlideDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc b/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc
index e526578701..85b1e50695 100644
--- a/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc
+++ b/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc
@@ -21,7 +21,7 @@ limitations under the License.
#include "tensorflow/core/util/sparse/sparse_tensor.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -274,5 +274,5 @@ TF_CALL_DATASET_TYPES(REGISTER_DATASET_KERNEL);
#undef REGISTER_DATASET_KERNEL
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/sql/driver_manager.cc b/tensorflow/core/kernels/data/sql/driver_manager.cc
index ffabda1a8a..783d1e6cb2 100644
--- a/tensorflow/core/kernels/data/sql/driver_manager.cc
+++ b/tensorflow/core/kernels/data/sql/driver_manager.cc
@@ -16,7 +16,7 @@ limitations under the License.
#include "tensorflow/core/kernels/data/sql/sqlite_query_connection.h"
namespace tensorflow {
-
+namespace data {
namespace sql {
std::unique_ptr<QueryConnection> DriverManager::CreateQueryConnection(
@@ -30,5 +30,5 @@ std::unique_ptr<QueryConnection> DriverManager::CreateQueryConnection(
}
} // namespace sql
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/sql/driver_manager.h b/tensorflow/core/kernels/data/sql/driver_manager.h
index a34691b5a2..c5428f396b 100644
--- a/tensorflow/core/kernels/data/sql/driver_manager.h
+++ b/tensorflow/core/kernels/data/sql/driver_manager.h
@@ -18,7 +18,7 @@ limitations under the License.
#include "tensorflow/core/kernels/data/sql/query_connection.h"
namespace tensorflow {
-
+namespace data {
namespace sql {
// A factory class for creating `QueryConnection` instances.
@@ -35,7 +35,7 @@ class DriverManager {
};
} // namespace sql
-
+} // namespace data
} // namespace tensorflow
#endif // TENSORFLOW_CORE_KERNELS_DATA_SQL_DRIVER_MANAGER_H_
diff --git a/tensorflow/core/kernels/data/sql/query_connection.h b/tensorflow/core/kernels/data/sql/query_connection.h
index e9ffca202f..2fd229a9bf 100644
--- a/tensorflow/core/kernels/data/sql/query_connection.h
+++ b/tensorflow/core/kernels/data/sql/query_connection.h
@@ -18,6 +18,7 @@ limitations under the License.
#include "tensorflow/core/framework/tensor.h"
namespace tensorflow {
+namespace data {
class IteratorContext;
@@ -63,7 +64,7 @@ class QueryConnection {
};
} // namespace sql
-
+} // namespace data
} // namespace tensorflow
#endif // TENSORFLOW_CORE_KERNELS_DATA_SQL_QUERY_CONNECTION_H_
diff --git a/tensorflow/core/kernels/data/sql/sqlite_query_connection.cc b/tensorflow/core/kernels/data/sql/sqlite_query_connection.cc
index 7cd07bd8ec..5108e83976 100644
--- a/tensorflow/core/kernels/data/sql/sqlite_query_connection.cc
+++ b/tensorflow/core/kernels/data/sql/sqlite_query_connection.cc
@@ -19,7 +19,7 @@ limitations under the License.
#include "tensorflow/core/lib/strings/stringprintf.h"
namespace tensorflow {
-
+namespace data {
namespace sql {
SqliteQueryConnection::SqliteQueryConnection() {}
@@ -115,5 +115,5 @@ void SqliteQueryConnection::FillTensorWithResultSetEntry(
}
} // namespace sql
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/sql/sqlite_query_connection.h b/tensorflow/core/kernels/data/sql/sqlite_query_connection.h
index 81b19530b7..175492c49d 100644
--- a/tensorflow/core/kernels/data/sql/sqlite_query_connection.h
+++ b/tensorflow/core/kernels/data/sql/sqlite_query_connection.h
@@ -22,7 +22,7 @@ limitations under the License.
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
-
+namespace data {
namespace sql {
class SqliteQueryConnection : public QueryConnection {
@@ -50,7 +50,7 @@ class SqliteQueryConnection : public QueryConnection {
};
} // namespace sql
-
+} // namespace data
} // namespace tensorflow
#endif // TENSORFLOW_CORE_KERNELS_DATA_SQL_SQLITE_QUERY_CONNECTION_H_
diff --git a/tensorflow/core/kernels/data/sql_dataset_ops.cc b/tensorflow/core/kernels/data/sql_dataset_ops.cc
index 2aa153fcfa..6bbe459332 100644
--- a/tensorflow/core/kernels/data/sql_dataset_ops.cc
+++ b/tensorflow/core/kernels/data/sql_dataset_ops.cc
@@ -24,8 +24,9 @@ limitations under the License.
#include "tensorflow/core/lib/strings/stringprintf.h"
namespace tensorflow {
-
+namespace data {
namespace {
+
// See documentation in ../ops/dataset_ops.cc for a high-level
// description of the following ops.
@@ -211,5 +212,5 @@ class SqlDatasetOp : public DatasetOpKernel {
REGISTER_KERNEL_BUILDER(Name("SqlDataset").Device(DEVICE_CPU), SqlDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/stats_aggregator_dataset_op.cc b/tensorflow/core/kernels/data/stats_aggregator_dataset_op.cc
index 75af73df54..f5314f7a75 100644
--- a/tensorflow/core/kernels/data/stats_aggregator_dataset_op.cc
+++ b/tensorflow/core/kernels/data/stats_aggregator_dataset_op.cc
@@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/core/lib/random/random.h"
namespace tensorflow {
+namespace data {
namespace {
class SetStatsAggregatorDatasetOp : public UnaryDatasetOpKernel {
@@ -135,4 +136,5 @@ class SetStatsAggregatorDatasetOp : public UnaryDatasetOpKernel {
REGISTER_KERNEL_BUILDER(Name("SetStatsAggregatorDataset").Device(DEVICE_CPU),
SetStatsAggregatorDatasetOp);
} // namespace
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/stats_aggregator_ops.cc b/tensorflow/core/kernels/data/stats_aggregator_ops.cc
index b133cfab54..a7ded67876 100644
--- a/tensorflow/core/kernels/data/stats_aggregator_ops.cc
+++ b/tensorflow/core/kernels/data/stats_aggregator_ops.cc
@@ -26,6 +26,7 @@ limitations under the License.
#include "tensorflow/core/platform/macros.h"
namespace tensorflow {
+namespace data {
namespace {
static mutex* get_counters_map_lock() {
@@ -145,4 +146,5 @@ REGISTER_KERNEL_BUILDER(Name("StatsAggregatorSummary").Device(DEVICE_CPU),
StatsAggregatorSummaryOp);
} // namespace
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/stats_dataset_ops.cc b/tensorflow/core/kernels/data/stats_dataset_ops.cc
index 8957f5d997..e9e42f05a1 100644
--- a/tensorflow/core/kernels/data/stats_dataset_ops.cc
+++ b/tensorflow/core/kernels/data/stats_dataset_ops.cc
@@ -22,6 +22,7 @@ limitations under the License.
#include "tensorflow/core/lib/random/random.h"
namespace tensorflow {
+namespace data {
namespace {
// This op defines a `Dataset` that passes through its input elements and
@@ -248,4 +249,5 @@ REGISTER_KERNEL_BUILDER(Name("BytesProducedStatsDataset").Device(DEVICE_CPU),
BytesProducedStatsDatasetOp);
} // namespace
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/take_dataset_op.cc b/tensorflow/core/kernels/data/take_dataset_op.cc
index e5c237dfaa..e5cdfdd732 100644
--- a/tensorflow/core/kernels/data/take_dataset_op.cc
+++ b/tensorflow/core/kernels/data/take_dataset_op.cc
@@ -17,7 +17,7 @@ limitations under the License.
#include "tensorflow/core/kernels/data/dataset.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -174,5 +174,5 @@ class TakeDatasetOp : public UnaryDatasetOpKernel {
REGISTER_KERNEL_BUILDER(Name("TakeDataset").Device(DEVICE_CPU), TakeDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/tensor_dataset_op.cc b/tensorflow/core/kernels/data/tensor_dataset_op.cc
index 1192fafc4c..e1cefd23d8 100644
--- a/tensorflow/core/kernels/data/tensor_dataset_op.cc
+++ b/tensorflow/core/kernels/data/tensor_dataset_op.cc
@@ -18,7 +18,7 @@ limitations under the License.
#include "tensorflow/core/kernels/data/dataset.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -140,5 +140,5 @@ REGISTER_KERNEL_BUILDER(Name("TensorDataset").Device(DEVICE_CPU),
TensorDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/tensor_queue_dataset_op.cc b/tensorflow/core/kernels/data/tensor_queue_dataset_op.cc
index ccd5e60acc..2ed636a400 100644
--- a/tensorflow/core/kernels/data/tensor_queue_dataset_op.cc
+++ b/tensorflow/core/kernels/data/tensor_queue_dataset_op.cc
@@ -24,7 +24,7 @@ limitations under the License.
#include "tensorflow/core/util/batch_util.h"
namespace tensorflow {
-
+namespace data {
namespace {
bool IsGreaterEqualToOrCompatibleWith(const PartialTensorShape& a,
@@ -648,5 +648,5 @@ REGISTER_KERNEL_BUILDER(Name("EnqueueInQueueDataset").Device(DEVICE_CPU),
EnqueueInQueueDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/tensor_slice_dataset_op.cc b/tensorflow/core/kernels/data/tensor_slice_dataset_op.cc
index dc32cd23e5..7dc64b0a75 100644
--- a/tensorflow/core/kernels/data/tensor_slice_dataset_op.cc
+++ b/tensorflow/core/kernels/data/tensor_slice_dataset_op.cc
@@ -19,7 +19,7 @@ limitations under the License.
#include "tensorflow/core/util/batch_util.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -168,5 +168,5 @@ REGISTER_KERNEL_BUILDER(Name("TensorSliceDataset").Device(DEVICE_CPU),
TensorSliceDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/unbatch_dataset_op.cc b/tensorflow/core/kernels/data/unbatch_dataset_op.cc
index 1a79f72b28..81c432b938 100644
--- a/tensorflow/core/kernels/data/unbatch_dataset_op.cc
+++ b/tensorflow/core/kernels/data/unbatch_dataset_op.cc
@@ -18,7 +18,7 @@ limitations under the License.
#include "tensorflow/core/util/batch_util.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -204,5 +204,5 @@ REGISTER_KERNEL_BUILDER(Name("UnbatchDataset").Device(DEVICE_CPU),
UnbatchDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/window_dataset.cc b/tensorflow/core/kernels/data/window_dataset.cc
index 0ab6beabfc..2ad4711aab 100644
--- a/tensorflow/core/kernels/data/window_dataset.cc
+++ b/tensorflow/core/kernels/data/window_dataset.cc
@@ -16,6 +16,7 @@ limitations under the License.
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
+namespace data {
namespace {
class WindowDataset : public DatasetBase {
@@ -107,4 +108,5 @@ Status NewWindowDataset(std::vector<std::vector<Tensor>> elements,
return Status::OK();
}
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/window_dataset.h b/tensorflow/core/kernels/data/window_dataset.h
index 7bd31a0bc7..84cb3c7860 100644
--- a/tensorflow/core/kernels/data/window_dataset.h
+++ b/tensorflow/core/kernels/data/window_dataset.h
@@ -23,6 +23,7 @@ limitations under the License.
#include "tensorflow/core/kernels/data/dataset.h"
namespace tensorflow {
+namespace data {
// Creates a dataset representing an eagerly-collected window of elements.
//
@@ -43,6 +44,7 @@ Status NewWindowDataset(std::vector<std::vector<Tensor>> elements,
std::vector<PartialTensorShape> output_shapes,
DatasetBase** out_dataset);
+} // namespace data
} // namespace tensorflow
#endif // TENSORFLOW_CORE_KERNELS_DATA_WINDOW_DATASET_H_
diff --git a/tensorflow/core/kernels/data/window_dataset_op.cc b/tensorflow/core/kernels/data/window_dataset_op.cc
index 41bf9d43fe..3975086841 100644
--- a/tensorflow/core/kernels/data/window_dataset_op.cc
+++ b/tensorflow/core/kernels/data/window_dataset_op.cc
@@ -19,7 +19,7 @@ limitations under the License.
#include "tensorflow/core/kernels/data/window_dataset.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -195,5 +195,5 @@ REGISTER_KERNEL_BUILDER(Name("WindowDataset").Device(DEVICE_CPU),
WindowDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/writer_ops.cc b/tensorflow/core/kernels/data/writer_ops.cc
index 1c49874a6a..3f76695bb1 100644
--- a/tensorflow/core/kernels/data/writer_ops.cc
+++ b/tensorflow/core/kernels/data/writer_ops.cc
@@ -22,7 +22,7 @@ limitations under the License.
#include "tensorflow/core/platform/file_system.h"
namespace tensorflow {
-
+namespace data {
namespace {
class ToTFRecordOp : public AsyncOpKernel {
@@ -104,4 +104,5 @@ REGISTER_KERNEL_BUILDER(Name("DatasetToTFRecord").Device(DEVICE_CPU),
ToTFRecordOp);
} // namespace
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/data/zip_dataset_op.cc b/tensorflow/core/kernels/data/zip_dataset_op.cc
index e4306579ed..61a2078f46 100644
--- a/tensorflow/core/kernels/data/zip_dataset_op.cc
+++ b/tensorflow/core/kernels/data/zip_dataset_op.cc
@@ -17,7 +17,7 @@ limitations under the License.
#include "tensorflow/core/kernels/data/dataset.h"
namespace tensorflow {
-
+namespace data {
namespace {
// See documentation in ../ops/dataset_ops.cc for a high-level
@@ -175,5 +175,5 @@ class ZipDatasetOp : public DatasetOpKernel {
REGISTER_KERNEL_BUILDER(Name("ZipDataset").Device(DEVICE_CPU), ZipDatasetOp);
} // namespace
-
+} // namespace data
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/eigen_backward_cuboid_convolutions.h b/tensorflow/core/kernels/eigen_backward_cuboid_convolutions.h
index 3ebeb7be2b..27918b410b 100644
--- a/tensorflow/core/kernels/eigen_backward_cuboid_convolutions.h
+++ b/tensorflow/core/kernels/eigen_backward_cuboid_convolutions.h
@@ -51,14 +51,18 @@ EIGEN_ALWAYS_INLINE static const typename internal::conditional<
internal::traits<OutputBackward>::NumDimensions>,
const TensorContractionOp<
const array<
- IndexPair<typename internal::traits<OutputBackward>::Index>, 2>,
- const TensorReshapingOp<
+ IndexPair<typename internal::traits<OutputBackward>::Index>, 1>,
+ const Eigen::TensorForcedEvalOp<const TensorReshapingOp<
const DSizes<typename internal::traits<OutputBackward>::Index,
- 3>,
- const TensorReverseOp<const array<bool, 5>, const Kernel> >,
+ 2>,
+ const TensorShufflingOp<
+ const array<
+ typename internal::traits<OutputBackward>::Index, 5>,
+ const TensorReverseOp<const Eigen::array<bool, 5>,
+ const Kernel> > > >,
const TensorReshapingOp<
const DSizes<typename internal::traits<OutputBackward>::Index,
- 3>,
+ 2>,
const TensorVolumePatchOp<Dynamic, Dynamic, Dynamic,
const OutputBackward> > > >,
TensorReshapingOp<
@@ -66,24 +70,27 @@ EIGEN_ALWAYS_INLINE static const typename internal::conditional<
internal::traits<OutputBackward>::NumDimensions>,
const TensorContractionOp<
const array<
- IndexPair<typename internal::traits<OutputBackward>::Index>, 2>,
+ IndexPair<typename internal::traits<OutputBackward>::Index>, 1>,
const TensorReshapingOp<
const DSizes<typename internal::traits<OutputBackward>::Index,
- 3>,
+ 2>,
const TensorVolumePatchOp<Dynamic, Dynamic, Dynamic,
const OutputBackward> >,
- const TensorReshapingOp<
+ const Eigen::TensorForcedEvalOp<const TensorReshapingOp<
const DSizes<typename internal::traits<OutputBackward>::Index,
- 3>,
- const TensorReverseOp<const array<bool, 5>,
- const Kernel> > > > >::type
+ 2>,
+ const TensorShufflingOp<
+ const array<
+ typename internal::traits<OutputBackward>::Index, 5>,
+ const TensorReverseOp<const Eigen::array<bool, 5>,
+ const Kernel> > > > > > >::type
CuboidConvolutionBackwardInput(
const Kernel& kernel, const OutputBackward& output_backward,
typename internal::traits<OutputBackward>::Index inputPlanes,
typename internal::traits<OutputBackward>::Index inputRows,
typename internal::traits<OutputBackward>::Index inputCols,
- const DenseIndex stridePlanes = 1, const DenseIndex strideRows = 1,
- const DenseIndex strideCols = 1) {
+ const DenseIndex plane_stride = 1, const DenseIndex row_stride = 1,
+ const DenseIndex col_stride = 1) {
typedef typename internal::traits<OutputBackward>::Index TensorIndex;
const TensorRef<const Tensor<typename internal::traits<Kernel>::Scalar,
internal::traits<Kernel>::NumDimensions,
@@ -125,58 +132,45 @@ CuboidConvolutionBackwardInput(
const TensorIndex outputCols =
isColMajor ? out.dimensions()[3] : out.dimensions()[NumDims - 4];
- TensorIndex forward_pad_z, forward_pad_y, forward_pad_x;
- const TensorIndex size_z =
- Eigen::divup(inputPlanes, static_cast<TensorIndex>(stridePlanes));
- const TensorIndex size_y =
- Eigen::divup(inputRows, static_cast<TensorIndex>(strideRows));
- const TensorIndex size_x =
- Eigen::divup(inputCols, static_cast<TensorIndex>(strideCols));
-
- // Infer padding type.
- if (size_z == outputPlanes && size_y == outputRows && size_x == outputCols) {
- // SAME padding.
- const TensorIndex dz = numext::maxi<TensorIndex>(
- 0, (size_z - 1) * stridePlanes + kernelPlanes - inputPlanes);
- const TensorIndex dy = numext::maxi<TensorIndex>(
- 0, (size_y - 1) * strideRows + kernelRows - inputRows);
- const TensorIndex dx = numext::maxi<TensorIndex>(
- 0, (size_x - 1) * strideCols + kernelCols - inputCols);
-
- forward_pad_z = dz / 2;
- forward_pad_y = dy / 2;
- forward_pad_x = dx / 2;
- } else {
- // VALID padding.
- forward_pad_z = 0;
- forward_pad_y = 0;
- forward_pad_x = 0;
- }
- const TensorIndex padding_ztop = kernelPlanes - 1 - forward_pad_z;
- const TensorIndex padding_top = kernelRows - 1 - forward_pad_y;
- const TensorIndex padding_left = kernelCols - 1 - forward_pad_x;
-
- const TensorIndex padding_zbottom = inputPlanes + kernelPlanes - 1 -
- (outputPlanes - 1) * stridePlanes - 1 -
- padding_ztop;
- const TensorIndex padding_bottom = inputRows + kernelRows - 1 -
- (outputRows - 1) * strideRows - 1 -
- padding_top;
- const TensorIndex padding_right = inputCols + kernelCols - 1 -
- (outputCols - 1) * strideCols - 1 -
- padding_left;
-
- eigen_assert(padding_ztop >= 0);
- eigen_assert(padding_zbottom >= 0);
+ // TODO(ezhulenev): Add support for inflated strides. Without inflated strides
+ // effective kernel planes/rows/cols are always the same as the kernel itself
+ // (see eigen_spatial_convolutions for details).
+ const TensorIndex kernelPlanesEff = kernelPlanes;
+ const TensorIndex kernelRowsEff = kernelRows;
+ const TensorIndex kernelColsEff = kernelCols;
+
+ // Computing the forward padding.
+ const TensorIndex forward_pad_top_z = numext::maxi<Index>(
+ 0,
+ ((outputPlanes - 1) * plane_stride + kernelPlanesEff - inputPlanes) / 2);
+ const TensorIndex forward_pad_top = numext::maxi<Index>(
+ 0, ((outputRows - 1) * row_stride + kernelRowsEff - inputRows) / 2);
+ const TensorIndex forward_pad_left = numext::maxi<Index>(
+ 0, ((outputCols - 1) * col_stride + kernelColsEff - inputCols) / 2);
+
+ const TensorIndex padding_top_z = kernelPlanesEff - 1 - forward_pad_top_z;
+ const TensorIndex padding_top = kernelRowsEff - 1 - forward_pad_top;
+ const TensorIndex padding_left = kernelColsEff - 1 - forward_pad_left;
+
+ const TensorIndex padding_bottom_z = inputPlanes -
+ (outputPlanes - 1) * plane_stride - 2 -
+ padding_top_z + kernelPlanesEff;
+ const TensorIndex padding_bottom = inputRows - (outputRows - 1) * row_stride -
+ 2 - padding_top + kernelRowsEff;
+ const TensorIndex padding_right = inputCols - (outputCols - 1) * col_stride -
+ 2 - padding_left + kernelColsEff;
+
+ eigen_assert(padding_top_z >= 0);
eigen_assert(padding_top >= 0);
eigen_assert(padding_left >= 0);
+ eigen_assert(padding_bottom_z >= 0);
eigen_assert(padding_bottom >= 0);
eigen_assert(padding_right >= 0);
- // The kernel has dimensions filters X channels X patch_planes X patch_rows X
- // patch_cols.
+ // The kernel has dimensions :
+ // filters x channels x patch_planes x patch_rows x patch_cols.
// We need to reverse the kernel along the spatial dimensions.
- array<bool, 5> kernel_reverse;
+ Eigen::array<bool, 5> kernel_reverse;
if (isColMajor) {
kernel_reverse[0] = false;
kernel_reverse[1] = false;
@@ -191,15 +185,35 @@ CuboidConvolutionBackwardInput(
kernel_reverse[4] = false;
}
- DSizes<TensorIndex, 3> kernel_dims;
+ // Reorder the dimensions to:
+ // filters x patch_planes x patch_rows x patch_cols x channels
+ array<TensorIndex, 5> kernel_shuffle;
if (isColMajor) {
- kernel_dims[0] = kernelFilters;
- kernel_dims[1] = kernelChannels;
- kernel_dims[2] = kernelRows * kernelCols * kernelPlanes;
+ // From: filters x channels x planes x rows x cols
+ // To: filters x planes x rows x cols x channels
+ kernel_shuffle[0] = 0;
+ kernel_shuffle[1] = 2;
+ kernel_shuffle[2] = 3;
+ kernel_shuffle[3] = 4;
+ kernel_shuffle[4] = 1;
} else {
- kernel_dims[0] = kernelRows * kernelCols * kernelPlanes;
+ // From: cols x rows x planes x channels x filters
+ // To: channels x cols x rows x planes x filters
+ kernel_shuffle[0] = 3;
+ kernel_shuffle[1] = 0;
+ kernel_shuffle[2] = 1;
+ kernel_shuffle[3] = 2;
+ kernel_shuffle[4] = 4;
+ }
+
+ // Collapse the dims
+ DSizes<TensorIndex, 2> kernel_dims;
+ if (isColMajor) {
+ kernel_dims[0] = kernelFilters * kernelPlanes * kernelRows * kernelCols;
kernel_dims[1] = kernelChannels;
- kernel_dims[2] = kernelFilters;
+ } else {
+ kernel_dims[1] = kernelFilters * kernelPlanes * kernelRows * kernelCols;
+ kernel_dims[0] = kernelChannels;
}
// The output_backward has dimensions out_depth X out_planes X out_rows X
@@ -208,36 +222,32 @@ CuboidConvolutionBackwardInput(
// dimensions:
// out_depth X (patch_planes * patch_rows * patch_cols) X (input_planes *
// input_rows * input_cols * OTHERS)
- DSizes<TensorIndex, 3> pre_contract_dims;
+ DSizes<TensorIndex, 2> pre_contract_dims;
if (isColMajor) {
- pre_contract_dims[0] = kernelFilters;
- pre_contract_dims[1] = kernelRows * kernelCols * kernelPlanes;
- pre_contract_dims[2] = inputRows * inputCols * inputPlanes;
+ pre_contract_dims[0] =
+ kernelFilters * kernelPlanes * kernelRows * kernelCols;
+ pre_contract_dims[1] = inputPlanes * inputRows * inputCols;
for (int i = 4; i < NumDims; ++i) {
- pre_contract_dims[2] *= out.dimension(i);
+ pre_contract_dims[1] *= out.dimension(i);
}
} else {
- pre_contract_dims[2] = kernelFilters;
- pre_contract_dims[1] = kernelRows * kernelCols * kernelPlanes;
- pre_contract_dims[0] = inputRows * inputCols * inputPlanes;
+ pre_contract_dims[1] =
+ kernelFilters * kernelPlanes * kernelRows * kernelCols;
+ pre_contract_dims[0] = inputPlanes * inputRows * inputCols;
for (int i = 0; i < NumDims - 4; ++i) {
pre_contract_dims[0] *= out.dimension(i);
}
}
- // We will contract along dimensions (0, 2) in kernel and (0, 1) in
- // output_backward, if this is col-major, and
- // dimensions (0, 2) in kernel and (1, 2) in output_backward, if this
- // row-major.
- array<IndexPair<TensorIndex>, 2> contract_dims;
+ // We will contract along the fused dimension that contains the kernelFilters,
+ // kernelPlanes, kernelRows and kernelCols.
+ array<IndexPair<TensorIndex>, 1> contract_dims;
if (isColMajor) {
// col-major: kernel.contract(output.patches)
contract_dims[0] = IndexPair<TensorIndex>(0, 0);
- contract_dims[1] = IndexPair<TensorIndex>(2, 1);
} else {
// row-major: output.patches.contract(kernel)
- contract_dims[0] = IndexPair<TensorIndex>(1, 0);
- contract_dims[1] = IndexPair<TensorIndex>(2, 2);
+ contract_dims[0] = IndexPair<TensorIndex>(1, 1);
}
// Post contraction, the dimensions of the input_backprop is
@@ -261,40 +271,31 @@ CuboidConvolutionBackwardInput(
}
}
- DSizes<TensorIndex, NumDims> strides;
- for (int i = 0; i < NumDims; i++) {
- strides[i] = 1;
- }
- if (isColMajor) {
- strides[1] = stridePlanes;
- strides[2] = strideRows;
- strides[3] = strideCols;
- } else {
- strides[NumDims - 2] = stridePlanes;
- strides[NumDims - 3] = strideRows;
- strides[NumDims - 4] = strideCols;
- }
-
return choose(
Cond<internal::traits<OutputBackward>::Layout == ColMajor>(),
kernel.reverse(kernel_reverse)
+ .shuffle(kernel_shuffle)
.reshape(kernel_dims)
+ .eval()
.contract(output_backward
.extract_volume_patches(
kernelPlanes, kernelRows, kernelCols, 1, 1, 1,
- stridePlanes, strideRows, strideCols, padding_ztop,
- padding_zbottom, padding_top, padding_bottom,
+ plane_stride, row_stride, col_stride, padding_top_z,
+ padding_bottom_z, padding_top, padding_bottom,
padding_left, padding_right)
.reshape(pre_contract_dims),
contract_dims)
.reshape(post_contract_dims),
output_backward
.extract_volume_patches(kernelPlanes, kernelRows, kernelCols, 1, 1, 1,
- stridePlanes, strideRows, strideCols,
- padding_ztop, padding_zbottom, padding_top,
+ plane_stride, row_stride, col_stride,
+ padding_top_z, padding_bottom_z, padding_top,
padding_bottom, padding_left, padding_right)
.reshape(pre_contract_dims)
- .contract(kernel.reverse(kernel_reverse).reshape(kernel_dims),
+ .contract(kernel.reverse(kernel_reverse)
+ .shuffle(kernel_shuffle)
+ .reshape(kernel_dims)
+ .eval(),
contract_dims)
.reshape(post_contract_dims));
}
diff --git a/tensorflow/core/kernels/eigen_backward_spatial_convolutions.h b/tensorflow/core/kernels/eigen_backward_spatial_convolutions.h
index cb0a76dac4..8d06107553 100644
--- a/tensorflow/core/kernels/eigen_backward_spatial_convolutions.h
+++ b/tensorflow/core/kernels/eigen_backward_spatial_convolutions.h
@@ -189,14 +189,19 @@ SpatialConvolutionBackwardInput(
}
#endif
- // Reorder the dimensions to filters X patch_rows X patch_cols X channels
+ // Reorder the dimensions to:
+ // filters x patch_rows x patch_cols x channels
array<TensorIndex, 4> kernel_shuffle;
if (isColMajor) {
+ // From: filters x channels x rows x cols
+ // To: filters x rows x cols x channels
kernel_shuffle[0] = 0;
kernel_shuffle[1] = 2;
kernel_shuffle[2] = 3;
kernel_shuffle[3] = 1;
} else {
+ // From: cols x rows x channels x filters
+ // To: channels x cols x rows x filters
kernel_shuffle[0] = 2;
kernel_shuffle[1] = 0;
kernel_shuffle[2] = 1;
diff --git a/tensorflow/core/kernels/eigen_benchmark_cpu_test.cc b/tensorflow/core/kernels/eigen_benchmark_cpu_test.cc
index 7c2bbb8148..3b34f650b6 100644
--- a/tensorflow/core/kernels/eigen_benchmark_cpu_test.cc
+++ b/tensorflow/core/kernels/eigen_benchmark_cpu_test.cc
@@ -403,9 +403,15 @@ BM_CuboidConvolutions(8, // batch size
16, 5, 5, 5, // filter: count, height, width, panes
"conv3d_depth4");
BM_CuboidConvolutions(8, 25, 25, 25, 8, 16, 5, 5, 5, "conv3d_depth8");
+BM_CuboidConvolutions(2, 9, 31, 31, 64, 64, 5, 5, 5, "b2_conv3d_1");
+BM_CuboidConvolutions(2, 5, 27, 27, 64, 64, 5, 5, 5, "b2_conv3d_2");
BM_CuboidConvolutionsBwdInput(8, 25, 25, 25, 4, 16, 5, 5, 5, "conv3d_depth4");
BM_CuboidConvolutionsBwdInput(8, 25, 25, 25, 8, 16, 5, 5, 5, "conv3d_depth8");
+BM_CuboidConvolutionsBwdInput(2, 9, 31, 31, 64, 64, 5, 5, 5, "b2_conv3d_1");
+BM_CuboidConvolutionsBwdInput(2, 5, 27, 27, 64, 64, 5, 5, 5, "b2_conv3d_2");
BM_CuboidConvolutionsBwdKernel(8, 25, 25, 25, 4, 16, 5, 5, 5, "conv3d_depth4");
BM_CuboidConvolutionsBwdKernel(8, 25, 25, 25, 8, 16, 5, 5, 5, "conv3d_depth8");
+BM_CuboidConvolutionsBwdKernel(2, 9, 31, 31, 64, 64, 5, 5, 5, "b2_conv3d_1");
+BM_CuboidConvolutionsBwdKernel(2, 5, 27, 27, 64, 64, 5, 5, 5, "b2_conv3d_2");
diff --git a/tensorflow/core/kernels/gather_nd_op_cpu_impl.h b/tensorflow/core/kernels/gather_nd_op_cpu_impl.h
index 66ae7f0894..277ee2be02 100644
--- a/tensorflow/core/kernels/gather_nd_op_cpu_impl.h
+++ b/tensorflow/core/kernels/gather_nd_op_cpu_impl.h
@@ -123,10 +123,10 @@ struct GatherNdSlice<CPUDevice, T, Index, IXDIM> {
// is considerably more efficient.
#pragma omp parallel for
for (Eigen::DenseIndex i = 0; i < batch_size; i++) {
- const Eigen::array<Eigen::DenseIndex, 1> loc = i;
+ const Eigen::array<Eigen::DenseIndex, 1> loc{i};
gather_nd_generator(loc);
}
-#else
+#else // INTEL_MKL
Tscratch.device(d) = Tscratch.reshape(reshape_dims)
.broadcast(broadcast_dims)
.generate(gather_nd_generator)
diff --git a/tensorflow/core/util/ctc/ctc_beam_entry.h b/tensorflow/core/util/ctc/ctc_beam_entry.h
index 973e315f09..24002e72a0 100644
--- a/tensorflow/core/util/ctc/ctc_beam_entry.h
+++ b/tensorflow/core/util/ctc/ctc_beam_entry.h
@@ -1,4 +1,3 @@
-// LINT.IfChange
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
@@ -13,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
+// LINT.IfChange
#ifndef TENSORFLOW_CORE_UTIL_CTC_CTC_BEAM_ENTRY_H_
#define TENSORFLOW_CORE_UTIL_CTC_CTC_BEAM_ENTRY_H_
diff --git a/tensorflow/core/util/ctc/ctc_beam_scorer.h b/tensorflow/core/util/ctc/ctc_beam_scorer.h
index 1a622babe1..1e45a8abd3 100644
--- a/tensorflow/core/util/ctc/ctc_beam_scorer.h
+++ b/tensorflow/core/util/ctc/ctc_beam_scorer.h
@@ -1,4 +1,3 @@
-// LINT.IfChange
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
@@ -13,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
+// LINT.IfChange
// Collection of scoring classes that can be extended and provided to the
// CTCBeamSearchDecoder to incorporate additional scoring logic (such as a
diff --git a/tensorflow/core/util/ctc/ctc_beam_search.h b/tensorflow/core/util/ctc/ctc_beam_search.h
index 5e2aeb7830..6fbb1ed0da 100644
--- a/tensorflow/core/util/ctc/ctc_beam_search.h
+++ b/tensorflow/core/util/ctc/ctc_beam_search.h
@@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
+// LINT.IfChange
#ifndef TENSORFLOW_CORE_UTIL_CTC_CTC_BEAM_SEARCH_H_
#define TENSORFLOW_CORE_UTIL_CTC_CTC_BEAM_SEARCH_H_
diff --git a/tensorflow/core/util/ctc/ctc_decoder.h b/tensorflow/core/util/ctc/ctc_decoder.h
index 3be36822e5..b55d7d77ac 100644
--- a/tensorflow/core/util/ctc/ctc_decoder.h
+++ b/tensorflow/core/util/ctc/ctc_decoder.h
@@ -1,4 +1,3 @@
-// LINT.IfChange
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
@@ -13,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
+// LINT.IfChange
#ifndef TENSORFLOW_CORE_UTIL_CTC_CTC_DECODER_H_
#define TENSORFLOW_CORE_UTIL_CTC_CTC_DECODER_H_
diff --git a/tensorflow/core/util/ctc/ctc_loss_util.h b/tensorflow/core/util/ctc/ctc_loss_util.h
index 36be9e92ef..054412d388 100644
--- a/tensorflow/core/util/ctc/ctc_loss_util.h
+++ b/tensorflow/core/util/ctc/ctc_loss_util.h
@@ -1,4 +1,3 @@
-// LINT.IfChange
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
@@ -13,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
+// LINT.IfChange
#ifndef TENSORFLOW_CORE_UTIL_CTC_CTC_LOSS_UTIL_H_
#define TENSORFLOW_CORE_UTIL_CTC_CTC_LOSS_UTIL_H_
diff --git a/tensorflow/python/BUILD b/tensorflow/python/BUILD
index 5af6437c56..ba9c6a2320 100644
--- a/tensorflow/python/BUILD
+++ b/tensorflow/python/BUILD
@@ -2090,6 +2090,18 @@ py_library(
srcs = [
"ops/custom_gradient.py",
"ops/gradients.py",
+ ],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":gradients_impl",
+ "//tensorflow/python/eager:function",
+ "//tensorflow/python/eager:tape",
+ ],
+)
+
+py_library(
+ name = "gradients_impl",
+ srcs = [
"ops/gradients_impl.py",
],
srcs_version = "PY2AND3",
@@ -4381,6 +4393,7 @@ cuda_py_tests(
"training/ftrl_test.py",
"training/gradient_descent_test.py",
"training/learning_rate_decay_test.py",
+ "training/learning_rate_decay_v2_test.py",
"training/momentum_test.py",
"training/optimizer_test.py",
"training/proximal_adagrad_test.py",
diff --git a/tensorflow/python/__init__.py b/tensorflow/python/__init__.py
index a2ab63bb48..4921ecc43c 100644
--- a/tensorflow/python/__init__.py
+++ b/tensorflow/python/__init__.py
@@ -48,6 +48,13 @@ import numpy as np
from tensorflow.python import pywrap_tensorflow
+from tensorflow.python.tools import component_api_helper
+component_api_helper.package_hook(
+ parent_package_str='tensorflow.python',
+ child_package_str=(
+ 'tensorflow_estimator.python.estimator'))
+del component_api_helper
+
# Protocol buffers
from tensorflow.core.framework.graph_pb2 import *
from tensorflow.core.framework.node_def_pb2 import *
diff --git a/tensorflow/python/client/session_test.py b/tensorflow/python/client/session_test.py
index 052be68385..f87a96e547 100644
--- a/tensorflow/python/client/session_test.py
+++ b/tensorflow/python/client/session_test.py
@@ -49,6 +49,8 @@ from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gen_control_flow_ops
+# Import gradients to resolve circular imports
+from tensorflow.python.ops import gradients # pylint: disable=unused-import
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
diff --git a/tensorflow/python/eager/BUILD b/tensorflow/python/eager/BUILD
index 6f48d38b58..85da1baaf0 100644
--- a/tensorflow/python/eager/BUILD
+++ b/tensorflow/python/eager/BUILD
@@ -241,7 +241,7 @@ py_library(
"//tensorflow/python:dtypes",
"//tensorflow/python:errors",
"//tensorflow/python:framework_ops",
- "//tensorflow/python:gradients",
+ "//tensorflow/python:gradients_impl",
"//tensorflow/python:graph_to_function_def",
"//tensorflow/python:util",
"//tensorflow/python/eager:context",
diff --git a/tensorflow/python/eager/function.py b/tensorflow/python/eager/function.py
index 6c87dccaf1..d56c1457e0 100644
--- a/tensorflow/python/eager/function.py
+++ b/tensorflow/python/eager/function.py
@@ -55,8 +55,11 @@ from tensorflow.python.util import tf_inspect
# (function -> gradients_impl -> control_flow_ops -> cond_v2_impl).
cond_v2_impl._function = sys.modules[__name__] # pylint: disable=protected-access
+# This is to avoid a circular dependency with gradients_impl
+gradients_impl._function = sys.modules[__name__] # pylint: disable=protected-access
-def create_substitute_placeholder(value, name, dtype=None):
+
+def _create_substitute_placeholder(value, name, dtype=None):
"""Creates a placeholder for `value` and propagates shape info to it."""
# Note: setting ops.control_dependencies(None) ensures we always put
# capturing placeholders outside of any control flow context.
@@ -88,100 +91,6 @@ def create_substitute_placeholder(value, name, dtype=None):
return placeholder
-def capture_value(tensor_map, value, dtype, name):
- """Capture a value from outside the function, to pass in as an extra arg."""
- captured_value = tensor_map.get(value, None)
- if captured_value is None:
- captured_value = create_substitute_placeholder(value, name=name,
- dtype=dtype)
- tensor_map[value] = captured_value
- tape.record_operation("captured_value", [captured_value], [value],
- lambda x: [x])
- return captured_value
-
-
-class CapturingGraph(ops.Graph):
- """Graph that can capture tensors from other graphs.
-
- Attributes:
- captures: Maps external tensor -> internal tensor (e.g. input placeholder).
- The entries are in the order they were captured.
- """
-
- def __init__(self):
- super(CapturingGraph, self).__init__()
-
- self.captures = collections.OrderedDict()
- self._building_function = True
-
- # Map from resource tensor name to last op (in program order) which uses
- # this tensor. Used to enforce that execution order matches program order
- # for resource tensors.
- self._last_op_using_resource_tensor = {}
-
- def clear_resource_control_flow_state(self):
- self._last_op_using_resource_tensor = {}
-
- # TODO(skyewm): get rid of name and use the name of `tensor`.
- def capture(self, tensor, name=None):
- """Capture `tensor` if it's external to this graph.
-
- If `tensor` is from a different graph, returns a placeholder for it.
- `tensor` and the placeholder will also appears in self.captures. Multiple
- calls to this method with the same `tensor` argument will return the same
- placeholder. If `tensor` is from this graph, returns `tensor`.
-
- Args:
- tensor: Tensor. May be from this FuncGraph or a different graph.
- name: Optional name if a placeholder is created.
-
- Returns:
- Tensor from this FuncGraph.
- """
- if isinstance(tensor, ops.EagerTensor):
- if name is None:
- name = str(ops.uid())
- return capture_value(self.captures, tensor, tensor.dtype, name)
- if tensor.graph is not self:
- if name is None:
- name = tensor.op.name
- return capture_value(self.captures, tensor, tensor.dtype, name)
- return tensor
-
- def create_op(
- self,
- op_type,
- inputs,
- dtypes, # pylint: disable=redefined-outer-name
- input_types=None,
- name=None,
- attrs=None,
- op_def=None,
- compute_shapes=True,
- compute_device=True):
- """Captures an external inputs before calling Graph.capture_op."""
- # This capturing logic interacts poorly with control flow contexts which
- # want to replace inputs of ops far too late in the process. This can lead
- # the context to get confused and try to create an Enter for an Enter. We
- # can detect this here and skip the additional Enter which can confuse loop
- # validation logic.
- if op_type == "Enter" and inputs[0].op.type == "Enter":
- if inputs[0].op.get_attr("frame_name") == attrs["frame_name"].s:
- return inputs[0].op
- # Calling AddValue on the control flow contexts to force creation of the
- # backward accumulators in the original graph before we create placeholders
- # to capture the inputs.
- ctxt = ops.get_default_graph()._control_flow_context # pylint: disable=protected-access
- for i, inp in enumerate(inputs):
- if ctxt is not None and hasattr(ctxt, "AddValue"):
- inp = ctxt.AddValue(inp)
- inp = self.capture(inp)
- inputs[i] = inp
- return super(CapturingGraph, self).create_op(
- op_type, inputs, dtypes, input_types, name, attrs, op_def,
- compute_device=compute_device)
-
-
def _get_device_functions(ctx, graph):
"""Returns a tuple of device functions representing the device stack."""
if ctx.executing_eagerly():
@@ -190,7 +99,7 @@ def _get_device_functions(ctx, graph):
return tuple(graph._device_functions_outer_to_inner) # pylint: disable=protected-access
-class FuncGraph(CapturingGraph):
+class FuncGraph(ops.Graph):
"""Graph representing a function body.
Attributes:
@@ -207,6 +116,8 @@ class FuncGraph(CapturingGraph):
variables: Variables that should be watched during function execution.
outer_graph: The graph this function is defined in. May be another FuncGraph
or the global default Graph.
+ captures: Maps external tensor -> internal tensor (i.e. input placeholder).
+ The entries are in the order they were captured.
seed: The graph-level random seed.
"""
@@ -227,6 +138,13 @@ class FuncGraph(CapturingGraph):
self.structured_outputs = None
self.variables = []
self.outer_graph = ops.get_default_graph()
+ self.captures = collections.OrderedDict()
+
+ self._building_function = True
+ # Map from resource tensor name to last op (in program order) which uses
+ # this tensor. Used to enforce that execution order matches program order
+ # for resource tensors.
+ self._last_op_using_resource_tensor = {}
graph = self.outer_graph
@@ -255,15 +173,107 @@ class FuncGraph(CapturingGraph):
self._graph_key = graph._graph_key
# pylint: enable=protected-access
+ def create_op(
+ self,
+ op_type,
+ inputs,
+ dtypes,
+ input_types=None,
+ name=None,
+ attrs=None,
+ op_def=None,
+ compute_shapes=True,
+ compute_device=True):
+ """Like Graph.create_op, except handles external input tensors.
+
+ This overload adds functionality to create_op to "capture" any external
+ input tensors, i.e. tensors from the eager context or outer function graphs
+ if this is a nested function. See `capture` for more information.
+
+ Args:
+ op_type: The `Operation` type to create. This corresponds to the
+ `OpDef.name` field for the proto that defines the operation.
+ inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
+ dtypes: A list of `DType` objects that will be the types of the tensors
+ that the operation produces.
+ input_types: (Optional.) A list of `DType`s that will be the types of
+ the tensors that the operation consumes. By default, uses the base
+ `DType` of each input in `inputs`. Operations that expect
+ reference-typed inputs must specify `input_types` explicitly.
+ name: (Optional.) A string name for the operation. If not specified, a
+ name is generated based on `op_type`.
+ attrs: (Optional.) A dictionary where the key is the attribute name (a
+ string) and the value is the respective `attr` attribute of the
+ `NodeDef` proto that will represent the operation (an `AttrValue`
+ proto).
+ op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
+ the operation will have.
+ compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always
+ computed).
+ compute_device: (Optional.) If True, device functions will be executed
+ to compute the device property of the Operation.
+
+ Returns:
+ An `Operation` object.
+ """
+ # This capturing logic interacts poorly with control flow contexts which
+ # want to replace inputs of ops far too late in the process. This can lead
+ # the context to get confused and try to create an Enter for an Enter. We
+ # can detect this here and skip the additional Enter which can confuse loop
+ # validation logic.
+ if op_type == "Enter" and inputs[0].op.type == "Enter":
+ if inputs[0].op.get_attr("frame_name") == attrs["frame_name"].s:
+ return inputs[0].op
+ # Calling AddValue on the control flow contexts to force creation of the
+ # backward accumulators in the original graph before we create placeholders
+ # to capture the inputs.
+ ctxt = ops.get_default_graph()._control_flow_context # pylint: disable=protected-access
+ for i, inp in enumerate(inputs):
+ # TPU Estimator defines a control flow context with no AddValue method.
+ if ctxt is not None and hasattr(ctxt, "AddValue"):
+ inp = ctxt.AddValue(inp)
+ inp = self.capture(inp)
+ inputs[i] = inp
+ return super(FuncGraph, self).create_op(
+ op_type, inputs, dtypes, input_types, name, attrs, op_def,
+ compute_device=compute_device)
+
def capture(self, tensor, name=None):
- """Calls CapturingGraph.capture and updates self.inputs if necessary."""
- new_capture = tensor not in self.captures
- internal_tensor = super(FuncGraph, self).capture(tensor, name)
+ """Captures `tensor` if it's external to this graph.
- if new_capture and tensor is not internal_tensor:
- self.inputs.append(internal_tensor)
+ If `tensor` is from a different graph, returns a placeholder for it.
+ `tensor` and the placeholder will appear in self.captures, and the
+ placeholder will appear in self.inputs. Multiple calls to this method with
+ the same `tensor` argument will return the same placeholder. If `tensor` is
+ from this graph, returns `tensor`.
+
+ Args:
+ tensor: Tensor. May be from this FuncGraph or a different graph.
+ name: Optional name if a placeholder is created.
+
+ Returns:
+ Tensor from this FuncGraph.
+ """
+ if isinstance(tensor, ops.EagerTensor):
+ if name is None:
+ name = str(ops.uid())
+ return self._capture_helper(tensor, name)
+ if tensor.graph is not self:
+ if name is None:
+ name = tensor.op.name
+ return self._capture_helper(tensor, name)
+ return tensor
- return internal_tensor
+ def _capture_helper(self, tensor, name):
+ captured_tensor = self.captures.get(tensor, None)
+ if captured_tensor is None:
+ captured_tensor = _create_substitute_placeholder(tensor, name=name,
+ dtype=tensor.dtype)
+ self.captures[tensor] = captured_tensor
+ self.inputs.append(captured_tensor)
+ tape.record_operation("captured_value", [captured_tensor], [tensor],
+ lambda x: [x])
+ return captured_tensor
@property
def external_captures(self):
diff --git a/tensorflow/python/eager/tensor_test.py b/tensorflow/python/eager/tensor_test.py
index 871136e2c8..32742a9b96 100644
--- a/tensorflow/python/eager/tensor_test.py
+++ b/tensorflow/python/eager/tensor_test.py
@@ -295,6 +295,7 @@ class TFETensorUtilTest(test_util.TensorFlowTestCase):
def testFloatTensor(self):
self.assertEqual(dtypes.float64, _create_tensor(np.float64()).dtype)
self.assertEqual(dtypes.float32, _create_tensor(np.float32()).dtype)
+ self.assertEqual(dtypes.float16, _create_tensor(np.float16()).dtype)
self.assertEqual(dtypes.float32, _create_tensor(0.0).dtype)
def testSliceDimOutOfRange(self):
diff --git a/tensorflow/python/estimator/BUILD b/tensorflow/python/estimator/BUILD
index 9fce172bee..cf8e18b216 100644
--- a/tensorflow/python/estimator/BUILD
+++ b/tensorflow/python/estimator/BUILD
@@ -684,8 +684,11 @@ py_test(
shard_count = 4,
srcs_version = "PY2AND3",
tags = [
+ "manual", # b/112769036, b/113907597
+ "no_oss", # b/112769036, b/113907597
"no_windows",
- "notsan",
+ "nomsan",
+ "notsan", # b/67510291
],
deps = [
":keras",
diff --git a/tensorflow/python/estimator/estimator.py b/tensorflow/python/estimator/estimator.py
index e44a69b374..0f20acefdf 100644
--- a/tensorflow/python/estimator/estimator.py
+++ b/tensorflow/python/estimator/estimator.py
@@ -2056,7 +2056,7 @@ class WarmStartSettings(
var_name_to_vocab_info: [Optional] Dict of variable names (strings) to
`tf.estimator.VocabInfo`. The variable names should be "full" variables,
not the names of the partitions. If not explicitly provided, the variable
- is assumed to have no vocabulary.
+ is assumed to have no (changes to) vocabulary.
var_name_to_prev_var_name: [Optional] Dict of variable names (strings) to
name of the previously-trained variable in `ckpt_to_initialize_from`. If
not explicitly provided, the name of the variable is assumed to be same
diff --git a/tensorflow/python/estimator/run_config.py b/tensorflow/python/estimator/run_config.py
index b1ca207b62..3773810a04 100644
--- a/tensorflow/python/estimator/run_config.py
+++ b/tensorflow/python/estimator/run_config.py
@@ -521,7 +521,12 @@ class RunConfig(object):
eval_distribute=eval_distribute,
experimental_distribute=experimental_distribute)
- if train_distribute or eval_distribute or experimental_distribute:
+ # TODO(frankchn,priyag): Eventually use distributed coordinator for TPUs.
+ if ((train_distribute and
+ train_distribute.__class__.__name__ != 'TPUStrategy') or
+ (eval_distribute and
+ eval_distribute.__class__.__name__ != 'TPUStrategy') or
+ experimental_distribute):
logging.info('Initializing RunConfig with distribution strategies.')
distribute_coordinator_training.init_run_config(self, tf_config)
else:
diff --git a/tensorflow/python/framework/ops.py b/tensorflow/python/framework/ops.py
index 4cfd639bf9..9401309c19 100644
--- a/tensorflow/python/framework/ops.py
+++ b/tensorflow/python/framework/ops.py
@@ -55,6 +55,7 @@ from tensorflow.python.platform import app
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import decorator_utils
+from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import lock_util
from tensorflow.python.util import tf_contextlib
@@ -5807,11 +5808,8 @@ class GraphKeys(object):
_STREAMING_MODEL_PORTS = "streaming_model_ports"
@decorator_utils.classproperty
+ @deprecation.deprecated(None, "Use `tf.GraphKeys.GLOBAL_VARIABLES` instead.")
def VARIABLES(cls): # pylint: disable=no-self-argument
- logging.log_first_n(logging.WARN,
- "VARIABLES collection name is deprecated, please use "
- "GLOBAL_VARIABLES instead; VARIABLES will be removed "
- "after 2017-03-02.", 1)
return cls.GLOBAL_VARIABLES
diff --git a/tensorflow/python/framework/test_util.py b/tensorflow/python/framework/test_util.py
index 3b63e49a84..0925598e33 100644
--- a/tensorflow/python/framework/test_util.py
+++ b/tensorflow/python/framework/test_util.py
@@ -1073,13 +1073,9 @@ class TensorFlowTestCase(googletest.TestCase):
if context.executing_eagerly():
yield None
else:
- sess = self._create_session(graph, config, use_gpu, force_gpu)
- with self._constrain_devices_and_set_default(
- sess, use_gpu, force_gpu) as constrained_sess:
- # We need to do this to make sure the session closes, otherwise, even
- # if the user does with self.session():, it will not close the session.
- with constrained_sess:
- yield constrained_sess
+ with self._create_session(graph, config, force_gpu) as sess:
+ with self._constrain_devices_and_set_default(sess, use_gpu, force_gpu):
+ yield sess
@contextlib.contextmanager
def cached_session(self,
@@ -1127,10 +1123,11 @@ class TensorFlowTestCase(googletest.TestCase):
if context.executing_eagerly():
yield None
else:
- with self._get_cached_session(
- graph, config, use_gpu, force_gpu,
- crash_if_inconsistent_args=True) as sess:
- yield sess
+ sess = self._get_cached_session(
+ graph, config, force_gpu, crash_if_inconsistent_args=True)
+ with self._constrain_devices_and_set_default(sess, use_gpu,
+ force_gpu) as cached:
+ yield cached
@contextlib.contextmanager
def test_session(self,
@@ -1146,10 +1143,11 @@ class TensorFlowTestCase(googletest.TestCase):
yield None
else:
if graph is None:
- with self._get_cached_session(
- graph, config, use_gpu, force_gpu,
- crash_if_inconsistent_args=False) as sess:
- yield sess
+ sess = self._get_cached_session(
+ graph, config, force_gpu, crash_if_inconsistent_args=False)
+ with self._constrain_devices_and_set_default(sess, use_gpu,
+ force_gpu) as cached:
+ yield cached
else:
with self.session(graph, config, use_gpu, force_gpu) as sess:
yield sess
@@ -1835,91 +1833,69 @@ class TensorFlowTestCase(googletest.TestCase):
with sess.graph.device("/cpu:0"):
yield sess
- def _create_session(self, graph, config, use_gpu, force_gpu):
+ def _create_session(self, graph, config, force_gpu):
"""See session() for details."""
- if context.executing_eagerly():
- return None
- else:
+ def prepare_config(config):
+ """Returns a config for sessions.
- def prepare_config(config):
- """Returns a config for sessions.
-
- Args:
- config: An optional config_pb2.ConfigProto to use to configure the
- session.
- Returns:
- A config_pb2.ConfigProto object.
- """
- if config is None:
- config = config_pb2.ConfigProto()
- config.allow_soft_placement = not force_gpu
- config.gpu_options.per_process_gpu_memory_fraction = 0.3
- elif force_gpu and config.allow_soft_placement:
- config = config_pb2.ConfigProto().CopyFrom(config)
- config.allow_soft_placement = False
- # Don't perform optimizations for tests so we don't inadvertently run
- # gpu ops on cpu
- config.graph_options.optimizer_options.opt_level = -1
- config.graph_options.rewrite_options.constant_folding = (
- rewriter_config_pb2.RewriterConfig.OFF)
- config.graph_options.rewrite_options.arithmetic_optimization = (
- rewriter_config_pb2.RewriterConfig.OFF)
- return config
-
- return ErrorLoggingSession(graph=graph, config=prepare_config(config))
+ Args:
+ config: An optional config_pb2.ConfigProto to use to configure the
+ session.
+
+ Returns:
+ A config_pb2.ConfigProto object.
+ """
+ if config is None:
+ config = config_pb2.ConfigProto()
+ config.allow_soft_placement = not force_gpu
+ config.gpu_options.per_process_gpu_memory_fraction = 0.3
+ elif force_gpu and config.allow_soft_placement:
+ config = config_pb2.ConfigProto().CopyFrom(config)
+ config.allow_soft_placement = False
+ # Don't perform optimizations for tests so we don't inadvertently run
+ # gpu ops on cpu
+ config.graph_options.optimizer_options.opt_level = -1
+ config.graph_options.rewrite_options.constant_folding = (
+ rewriter_config_pb2.RewriterConfig.OFF)
+ config.graph_options.rewrite_options.arithmetic_optimization = (
+ rewriter_config_pb2.RewriterConfig.OFF)
+ return config
+
+ return ErrorLoggingSession(graph=graph, config=prepare_config(config))
- @contextlib.contextmanager
def _get_cached_session(self,
graph=None,
config=None,
- use_gpu=False,
force_gpu=False,
crash_if_inconsistent_args=True):
"""See cached_session() for documentation."""
- if context.executing_eagerly():
- yield None
+ if self._cached_session is None:
+ sess = self._create_session(
+ graph=graph, config=config, force_gpu=force_gpu)
+ self._cached_session = sess
+ self._cached_graph = graph
+ self._cached_config = config
+ self._cached_force_gpu = force_gpu
+ return sess
else:
- if self._cached_session is None:
- sess = self._create_session(
- graph=graph, config=config, use_gpu=use_gpu, force_gpu=force_gpu)
- self._cached_session = sess
- self._cached_graph = graph
- self._cached_config = config
- self._cached_use_gpu = use_gpu
- self._cached_force_gpu = force_gpu
- with self._constrain_devices_and_set_default(
- sess, use_gpu, force_gpu) as constrained_sess:
- yield constrained_sess
- else:
- if crash_if_inconsistent_args and self._cached_graph is not graph:
- raise ValueError("The graph used to get the cached session is "
- "different than the one that was used to create the "
- "session. Maybe create a new session with "
- "self.session()")
- if crash_if_inconsistent_args and self._cached_config is not config:
- raise ValueError("The config used to get the cached session is "
- "different than the one that was used to create the "
- "session. Maybe create a new session with "
- "self.session()")
- if crash_if_inconsistent_args and self._cached_use_gpu is not use_gpu:
- raise ValueError(
- "The use_gpu value used to get the cached session is "
- "different than the one that was used to create the "
- "session. Maybe create a new session with "
- "self.session()")
- if crash_if_inconsistent_args and (self._cached_force_gpu is
- not force_gpu):
- raise ValueError(
- "The force_gpu value used to get the cached session is "
- "different than the one that was used to create the "
- "session. Maybe create a new session with "
- "self.session()")
- # If you modify this logic, make sure to modify it in _create_session
- # as well.
- sess = self._cached_session
- with self._constrain_devices_and_set_default(
- sess, use_gpu, force_gpu) as constrained_sess:
- yield constrained_sess
+ if crash_if_inconsistent_args and self._cached_graph is not graph:
+ raise ValueError("The graph used to get the cached session is "
+ "different than the one that was used to create the "
+ "session. Maybe create a new session with "
+ "self.session()")
+ if crash_if_inconsistent_args and self._cached_config is not config:
+ raise ValueError("The config used to get the cached session is "
+ "different than the one that was used to create the "
+ "session. Maybe create a new session with "
+ "self.session()")
+ if crash_if_inconsistent_args and (self._cached_force_gpu is
+ not force_gpu):
+ raise ValueError(
+ "The force_gpu value used to get the cached session is "
+ "different than the one that was used to create the "
+ "session. Maybe create a new session with "
+ "self.session()")
+ return self._cached_session
@tf_export("test.create_local_cluster")
diff --git a/tensorflow/python/framework/test_util_test.py b/tensorflow/python/framework/test_util_test.py
index a0939f98b2..c4f8fa9108 100644
--- a/tensorflow/python/framework/test_util_test.py
+++ b/tensorflow/python/framework/test_util_test.py
@@ -71,9 +71,6 @@ class TestUtilTest(test_util.TensorFlowTestCase):
with self.cached_session(graph=ops.Graph()) as sess2:
pass
with self.assertRaises(ValueError):
- with self.cached_session(use_gpu=True) as sess2:
- pass
- with self.assertRaises(ValueError):
with self.cached_session(force_gpu=True) as sess2:
pass
# We make sure that test_session will cache the session even after the
diff --git a/tensorflow/python/keras/engine/base_layer.py b/tensorflow/python/keras/engine/base_layer.py
index b6b05c0311..cb19a412a2 100644
--- a/tensorflow/python/keras/engine/base_layer.py
+++ b/tensorflow/python/keras/engine/base_layer.py
@@ -1001,7 +1001,7 @@ class Layer(checkpointable.CheckpointableBase):
self.build(input_shape)
with context.graph_mode():
- graph = eager_function.CapturingGraph()
+ graph = eager_function.FuncGraph('graph')
with graph.as_default():
if isinstance(input_shape, list):
inputs = [generate_placeholders_from_shape(shape)
diff --git a/tensorflow/python/keras/engine/network.py b/tensorflow/python/keras/engine/network.py
index f8c23ed124..10dd70cf23 100644
--- a/tensorflow/python/keras/engine/network.py
+++ b/tensorflow/python/keras/engine/network.py
@@ -770,7 +770,7 @@ class Network(base_layer.Layer):
# and graph building, the variables created after building the model in
# a Graph are still valid when executing eagerly.
with context.graph_mode():
- graph = eager_function.CapturingGraph()
+ graph = eager_function.FuncGraph('graph')
with graph.as_default():
if isinstance(input_shape, list):
x = [base_layer.generate_placeholders_from_shape(shape)
diff --git a/tensorflow/python/keras/engine/training_distributed.py b/tensorflow/python/keras/engine/training_distributed.py
index a7bb1f8177..e440e02bfb 100644
--- a/tensorflow/python/keras/engine/training_distributed.py
+++ b/tensorflow/python/keras/engine/training_distributed.py
@@ -19,13 +19,16 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
+from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks as cbks
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.engine import distributed_training_utils
from tensorflow.python.keras.utils.generic_utils import Progbar
+from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
+from tensorflow.python.training import distribute as distribute_lib
def fit_loop(
@@ -64,6 +67,11 @@ def fit_loop(
"""
current_strategy = model._distribution_strategy
+ # TODO(priyag, sourabhbajaj): Remove this when the codepaths are merged.
+ if current_strategy.__class__.__name__ == 'TPUStrategy':
+ return _experimental_fit_loop(
+ model, iterator, epochs, initial_epoch, steps_per_epoch)
+
clone_model_on_towers(
model, current_strategy, make_callback_model=True)
@@ -116,11 +124,6 @@ def fit_loop(
do_validation = False
if validation_steps:
do_validation = True
- if steps_per_epoch is None:
- raise ValueError('Can only use `validation_steps` '
- 'when doing step-wise '
- 'training, i.e. `steps_per_epoch` '
- 'must be set.')
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = model.get_weights()
@@ -140,44 +143,46 @@ def fit_loop(
verbose=verbose)
out_labels = model.metrics_names or []
callbacks.on_train_begin()
+
+ assert steps_per_epoch is not None
+
for epoch in range(initial_epoch, epochs):
callbacks.on_epoch_begin(epoch)
- if steps_per_epoch is not None:
- epoch_logs = {}
- for step_index in range(steps_per_epoch):
- batch_logs = {'batch': step_index, 'size': 1}
- callbacks.on_batch_begin(step_index, batch_logs)
- try:
- outs = distributed_train_function(ins)
- except errors.OutOfRangeError:
- logging.warning('Your dataset iterator ran out of data; '
- 'interrupting training. Make sure that your dataset '
- 'can generate at least `steps_per_epoch * epochs` '
- 'batches (in this case, %d batches).' %
- steps_per_epoch * epochs)
- break
-
- if not isinstance(outs, list):
- outs = [outs]
-
- outs = _aggregate_metrics_across_towers(
- current_strategy.num_towers, out_labels, outs)
- for l, o in zip(out_labels, outs):
- batch_logs[l] = o
- callbacks.on_batch_end(step_index, batch_logs)
- if callbacks.model.stop_training:
- break
- if do_validation:
- val_outs = test_loop(
- model,
- val_iterator,
- steps=validation_steps,
- verbose=0)
- if not isinstance(val_outs, list):
- val_outs = [val_outs]
- # Same labels assumed.
- for l, o in zip(out_labels, val_outs):
- epoch_logs['val_' + l] = o
+ epoch_logs = {}
+ for step_index in range(steps_per_epoch):
+ batch_logs = {'batch': step_index, 'size': 1}
+ callbacks.on_batch_begin(step_index, batch_logs)
+ try:
+ outs = distributed_train_function(ins)
+ except errors.OutOfRangeError:
+ logging.warning('Your dataset iterator ran out of data; '
+ 'interrupting training. Make sure that your dataset '
+ 'can generate at least `steps_per_epoch * epochs` '
+ 'batches (in this case, %d batches).' %
+ steps_per_epoch * epochs)
+ break
+
+ if not isinstance(outs, list):
+ outs = [outs]
+
+ outs = _aggregate_metrics_across_towers(
+ current_strategy.num_towers, out_labels, outs)
+ for l, o in zip(out_labels, outs):
+ batch_logs[l] = o
+ callbacks.on_batch_end(step_index, batch_logs)
+ if callbacks.model.stop_training:
+ break
+ if do_validation:
+ val_outs = test_loop(
+ model,
+ val_iterator,
+ steps=validation_steps,
+ verbose=0)
+ if not isinstance(val_outs, list):
+ val_outs = [val_outs]
+ # Same labels assumed.
+ for l, o in zip(out_labels, val_outs):
+ epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
if callbacks.model.stop_training:
@@ -192,6 +197,139 @@ def fit_loop(
return model.history
+def _experimental_fit_loop(
+ model,
+ iterator,
+ epochs=100,
+ initial_epoch=0,
+ steps_per_epoch=None):
+ """fit function when using TPU DistributionStrategy for training.
+
+ Arguments:
+ model: Keras Model instance.
+ iterator: Iterator that returns inputs and targets
+ epochs: Number of times to iterate over the data
+ initial_epoch: Epoch at which to start training
+ (useful for resuming a previous training run)
+ steps_per_epoch: Total number of steps (batches of samples)
+ before declaring one epoch finished and starting the
+ next epoch. Ignored with the default value of `None`.
+
+ Returns:
+ Returns `None`.
+
+ Raises:
+ ValueError: in case of invalid arguments.
+ """
+ current_strategy = model._distribution_strategy
+
+ # TODO(priyag): Add validation that shapes are fully defined for TPU case.
+
+ # TODO(priyag, sourabhbajaj): This should be moved into a callback instead.
+ K.get_session().run(current_strategy.initialize())
+
+ def _per_device_train_function(model):
+ model._make_train_function()
+ return (model.train_function.inputs,
+ model.train_function.outputs,
+ model.train_function.updates_op,
+ model.train_function.session_kwargs)
+
+ # TODO(priyag, sourabhbajaj): This should likely not be hardcoded here.
+ K.set_learning_phase(1)
+
+ def step_fn(ctx, inputs, targets):
+ """Clones the model and calls make_train_function."""
+ # TODO(priyag, sourabhbajaj): Should cache this keyed on input shapes.
+ clone_model_on_towers(
+ model,
+ current_strategy,
+ make_callback_model=True,
+ inputs=inputs,
+ targets=targets)
+
+ (grouped_inputs, grouped_outputs, grouped_updates,
+ grouped_session_args) = current_strategy.call_for_each_tower(
+ _per_device_train_function, model._grouped_model)
+ (all_inputs, all_outputs, all_updates,
+ all_session_args) = distributed_training_utils.unwrap_values(
+ current_strategy, grouped_inputs, grouped_outputs,
+ grouped_updates, grouped_session_args, with_loss_tensor=True)
+ combined_fn = K.Function(
+ all_inputs, all_outputs,
+ updates=all_updates,
+ name='distributed_train_function',
+ **all_session_args)
+
+ # TODO(priyag, sourabhbajaj): Perhaps the aggregation type needs to be
+ # something else for different outputs.
+ out_labels = model.metrics_names or []
+ for label, output in zip(out_labels, combined_fn.outputs):
+ ctx.set_last_step_output(label, output,
+ aggregation=distribute_lib.get_loss_reduction())
+
+ # TODO(priyag, sourabhbajaj): Ignoring these things from the combined_fn:
+ # feed_dict, session kwargs, run options, run_metadata for now. These should
+ # be handled appropriately
+ return combined_fn.updates_op
+
+ # Add initial dummy values for loss and other metric tensors.
+ initial_loop_values = {}
+ initial_loop_values['loss'] = constant_op.constant(1e7)
+ for name, tensor in zip(model.metrics_names[1:], model.metrics_tensors):
+ initial_loop_values[name] = array_ops.zeros(tensor.shape, tensor.dtype)
+
+ with current_strategy.scope():
+ # TODO(priyag, sourabhbajaj): Adjust steps_per_run appropriately based on
+ # steps_per_epoch and number of epochs.
+ ctx = current_strategy.run_steps_on_dataset(
+ step_fn, iterator, iterations=current_strategy.steps_per_run,
+ initial_loop_values=initial_loop_values)
+
+ train_op = ctx.run_op
+ output_tensors = ctx.last_step_outputs
+
+ # Copy the weights from the original model to each of the replicated models.
+ orig_model_weights = model.get_weights()
+ with current_strategy.scope():
+ distributed_model = current_strategy.unwrap(model._grouped_model)[0]
+ distributed_training_utils.set_weights(
+ current_strategy, distributed_model, orig_model_weights)
+
+ assert steps_per_epoch is not None
+
+ # TODO(priyag, sourabhbajaj): Add callbacks support.
+ # TODO(priyag, sourabhbajaj): Add validation.
+ for epoch in range(initial_epoch, epochs):
+ for step_index in range(
+ 0, steps_per_epoch, current_strategy.steps_per_run):
+ try:
+ _, outs = K.get_session().run([train_op, output_tensors])
+ # TODO(priyag, sourabhbajaj): Remove this logging in favor of proper
+ # summaries through callbacks.
+ print('Epoch: {}, step_index: {}, loss: {}'.format(
+ epoch, step_index, outs['loss']))
+ for label, out in outs.items():
+ print(label, ': ', out)
+ except errors.OutOfRangeError:
+ logging.warning('Your dataset iterator ran out of data; '
+ 'interrupting training. Make sure that your dataset '
+ 'can generate at least `steps_per_epoch * epochs` '
+ 'batches (in this case, %d batches).' %
+ steps_per_epoch * epochs)
+ break
+
+ # Copy the weights back from the replicated model to the original model.
+ with current_strategy.scope():
+ updated_weights = current_strategy.unwrap(
+ model._grouped_model)[0].get_weights()
+ model.set_weights(updated_weights)
+
+ K.get_session().run(current_strategy.finalize())
+
+ # TODO(priyag, sourabhbajaj): Return history.
+
+
def test_loop(model, iterator, verbose=0, steps=None):
"""evaluate method to validate a model that uses DistributionStrategy.
@@ -373,12 +511,12 @@ def predict_loop(model, iterator, verbose=0, steps=None):
]
-def _clone_and_build_model(model):
+def _clone_and_build_model(model, inputs=None, targets=None):
"""Clone and build the given keras_model."""
# We need to set the import here since we run into a circular dependency
# error.
from tensorflow.python.keras import models # pylint: disable=g-import-not-at-top
- cloned_model = models.clone_model(model, input_tensors=None)
+ cloned_model = models.clone_model(model, input_tensors=inputs)
# Compile and build model.
if isinstance(model.optimizer, optimizers.TFOptimizer):
@@ -387,22 +525,29 @@ def _clone_and_build_model(model):
optimizer_config = model.optimizer.get_config()
optimizer = model.optimizer.__class__.from_config(optimizer_config)
+ # TODO(priyag): Is there a cleaner way to do this? The API doc suggests a
+ # single tensor should be OK but it throws an error in that case.
+ if (targets is not None and not isinstance(targets, list) and
+ not isinstance(targets, dict)):
+ targets = [targets]
cloned_model.compile(
optimizer,
model.loss,
metrics=model.metrics,
loss_weights=model.loss_weights,
sample_weight_mode=model.sample_weight_mode,
- weighted_metrics=model.weighted_metrics)
+ weighted_metrics=model.weighted_metrics,
+ target_tensors=targets)
return cloned_model
-def clone_model_on_towers(model, strategy, make_callback_model=False):
+def clone_model_on_towers(
+ model, strategy, make_callback_model=False, inputs=None, targets=None):
"""Create a cloned model on each tower, unless already created."""
if not model._grouped_model:
with strategy.scope():
model._grouped_model = strategy.call_for_each_tower(
- _clone_and_build_model, model)
+ _clone_and_build_model, model, inputs, targets)
if make_callback_model:
model._make_callback_model()
diff --git a/tensorflow/python/kernel_tests/py_func_test.py b/tensorflow/python/kernel_tests/py_func_test.py
index 79fcbaad43..5f5e24bd63 100644
--- a/tensorflow/python/kernel_tests/py_func_test.py
+++ b/tensorflow/python/kernel_tests/py_func_test.py
@@ -566,6 +566,18 @@ class PyFuncTest(test.TestCase):
dy_dx = gradients_impl.gradients(y, x)[0]
self.assertEqual(self.evaluate(dy_dx), 6.0)
+ def testEagerGradientGraphTwoOutputs(self):
+
+ def f(x, y):
+ return x * y, x / y
+
+ x = constant_op.constant(3.0)
+ y = constant_op.constant(2.0)
+ fa, fb = script_ops.eager_py_func(f, inp=[x, y],
+ Tout=[dtypes.float32, dtypes.float32])
+ dy_dx = gradients_impl.gradients(fa + fb, x)[0]
+ self.assertEqual(self.evaluate(dy_dx), 2.5)
+
@test_util.run_in_graph_and_eager_modes
def testEagerGradientTapeMultipleArgs(self):
diff --git a/tensorflow/python/lib/core/py_seq_tensor.cc b/tensorflow/python/lib/core/py_seq_tensor.cc
index 3b4f12ae31..269142a7c2 100644
--- a/tensorflow/python/lib/core/py_seq_tensor.cc
+++ b/tensorflow/python/lib/core/py_seq_tensor.cc
@@ -55,6 +55,10 @@ bool IsPyDouble(PyObject* obj) {
return PyIsInstance(obj, &PyDoubleArrType_Type); // NumPy double type.
}
+bool IsNumpyHalf(PyObject* obj) {
+ return PyIsInstance(obj, &PyHalfArrType_Type);
+}
+
bool IsPyFloat(PyObject* obj) {
return PyFloat_Check(obj) ||
PyIsInstance(obj, &PyFloatingArrType_Type); // NumPy float types
@@ -156,6 +160,8 @@ Status InferShapeAndType(PyObject* obj, TensorShape* shape, DataType* dtype) {
}
} else if (IsPyDouble(obj)) {
*dtype = DT_DOUBLE;
+ } else if (IsNumpyHalf(obj)) {
+ *dtype = DT_HALF;
} else if (IsPyFloat(obj)) {
*dtype = DT_FLOAT;
} else if (PyBool_Check(obj) || PyIsInstance(obj, &PyBoolArrType_Type)) {
@@ -357,6 +363,17 @@ const char* ConvertOneFloat(PyObject* v, T* out) {
DEFINE_HELPER(ConvertDouble, double, DT_DOUBLE, ConvertOneFloat<double>);
DEFINE_HELPER(ConvertFloat, float, DT_FLOAT, ConvertOneFloat<float>);
+const char* ConvertOneNumpyHalf(PyObject* v, Eigen::half* out) {
+ // NOTE(nareshmodi): Is there a way to convert to C double without the
+ // intermediate Python double? This will help with ConvertOneFloat as well.
+ Safe_PyObjectPtr as_float = make_safe(PyNumber_Float(v));
+ double v_double = PyFloat_AS_DOUBLE(as_float.get());
+ *out = Eigen::half(v_double);
+
+ return nullptr;
+}
+DEFINE_HELPER(ConvertNumpyHalf, Eigen::half, DT_HALF, ConvertOneNumpyHalf);
+
// String support
const char* ConvertOneString(PyObject* v, string* out) {
@@ -452,6 +469,9 @@ Status PySeqToTensor(PyObject* obj, PyObject* dtype, Tensor* ret) {
if (ConvertDouble(obj, shape, ret) == nullptr) return Status::OK();
break;
+ case DT_HALF:
+ RETURN_STRING_AS_STATUS(ConvertNumpyHalf(obj, shape, ret));
+
case DT_INT64:
if (ConvertInt64(obj, shape, ret) == nullptr) return Status::OK();
break;
@@ -489,8 +509,13 @@ Status PySeqToTensor(PyObject* obj, PyObject* dtype, Tensor* ret) {
// final type.
RETURN_STRING_AS_STATUS(ConvertDouble(obj, shape, ret));
}
+
case DT_DOUBLE:
RETURN_STRING_AS_STATUS(ConvertDouble(obj, shape, ret));
+
+ case DT_HALF:
+ RETURN_STRING_AS_STATUS(ConvertNumpyHalf(obj, shape, ret));
+
case DT_INT64:
if (requested_dtype == DT_INVALID) {
const char* error = ConvertInt32(obj, shape, ret);
diff --git a/tensorflow/python/ops/gradients.py b/tensorflow/python/ops/gradients.py
index 9fa8e27d5c..1dc666e78b 100644
--- a/tensorflow/python/ops/gradients.py
+++ b/tensorflow/python/ops/gradients.py
@@ -19,10 +19,10 @@ from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
+from tensorflow.python.eager import function
from tensorflow.python.eager.backprop import GradientTape
from tensorflow.python.ops.custom_gradient import custom_gradient
from tensorflow.python.ops.gradients_impl import AggregationMethod
from tensorflow.python.ops.gradients_impl import gradients
from tensorflow.python.ops.gradients_impl import hessians
# pylint: enable=unused-import
-
diff --git a/tensorflow/python/ops/gradients_impl.py b/tensorflow/python/ops/gradients_impl.py
index a68f680224..3268b38b86 100644
--- a/tensorflow/python/ops/gradients_impl.py
+++ b/tensorflow/python/ops/gradients_impl.py
@@ -31,7 +31,7 @@ from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
-from tensorflow.python.framework import function
+from tensorflow.python.framework import function as framework_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
@@ -58,6 +58,10 @@ from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
+# This is to avoid a circular dependency (eager.function depends on
+# gradients_impl). This is set in eager/function.py.
+_function = None
+
# This is to avoid a circular dependency with cond_v2_impl.
cond_v2_impl._gradients_impl = sys.modules[__name__] # pylint: disable=protected-access
@@ -121,7 +125,7 @@ def _MarkReachedOps(from_ops, reached_ops, func_graphs):
Args:
from_ops: list of Operations.
reached_ops: set of Operations.
- func_graphs: list of function._FuncGraphs. This method will traverse through
+ func_graphs: list of _function.FuncGraphs. This method will traverse through
these functions if they capture from_ops or any reachable ops.
"""
queue = collections.deque()
@@ -146,7 +150,7 @@ def _PendingCount(to_ops, from_ops, colocate_gradients_with_ops, func_graphs,
to_ops: list of Operations.
from_ops: list of Operations.
colocate_gradients_with_ops: Python bool. See docstring of gradients().
- func_graphs: list of function._FuncGraphs. This method will traverse through
+ func_graphs: list of _function.FuncGraphs. This method will traverse through
these functions if they capture from_ops or any reachable ops. This is
useful if to_ops occur in a function and from_ops are in an outer function
or graph.
@@ -441,6 +445,19 @@ def _RaiseNoGradWrtInitialLoopValError(op, from_ops, xs):
% target_op.name)
+def _IsFunction(graph):
+ return (isinstance(graph, _function.FuncGraph) or
+ isinstance(graph, framework_function._FuncGraph)) # pylint: disable=protected-access
+
+
+def _Captures(func_graph):
+ if isinstance(func_graph, _function.FuncGraph):
+ return func_graph.captures
+ else:
+ assert isinstance(func_graph, framework_function._FuncGraph) # pylint: disable=protected-access
+ return func_graph._captured # pylint: disable=protected-access
+
+
def _MaybeCaptured(t):
"""If t is a captured value placeholder, returns the original captured value.
@@ -448,11 +465,11 @@ def _MaybeCaptured(t):
t: Tensor
Returns:
- A tensor, potentially from a different Graph/function._FuncGraph.
+ A tensor, potentially from a different Graph/_function.FuncGraph.
"""
# pylint: disable=protected-access
- if isinstance(t.op.graph, function._FuncGraph) and t.op.type == "Placeholder":
- for input_t, placeholder_t in t.op.graph._captured.items():
+ if _IsFunction(t.op.graph) and t.op.type == "Placeholder":
+ for input_t, placeholder_t in _Captures(t.op.graph).items():
if t == placeholder_t:
return _MaybeCaptured(input_t)
# pylint: enable=protected-access
@@ -470,10 +487,10 @@ def _Inputs(op, xs):
Returns:
A list of tensors. The tensors may be from multiple
- Graph/function._FuncGraphs if op is in a function._FuncGraph and has
+ Graph/_function.FuncGraphs if op is in a _function.FuncGraph and has
captured inputs.
"""
- if isinstance(op.graph, function._FuncGraph): # pylint: disable=protected-access
+ if _IsFunction(op.graph): # pylint: disable=protected-access
# If we're differentiating w.r.t. `t`, do not attempt to traverse through it
# to a captured value. The algorithm needs to "see" `t` in this case, even
# if it's a function input for a captured value, whereas usually we'd like
@@ -489,7 +506,7 @@ def _Consumers(t, func_graphs):
Args:
t: Tensor
- func_graphs: a list of function._FuncGraphs that may have captured t.
+ func_graphs: a list of _function.FuncGraphs that may have captured t.
Returns:
A list of tensors. The tensors will be from the current graph and/or
@@ -497,7 +514,7 @@ def _Consumers(t, func_graphs):
"""
consumers = t.consumers()
for func in func_graphs:
- for input_t, placeholder in func._captured.items(): # pylint: disable=protected-access
+ for input_t, placeholder in _Captures(func).items():
if input_t == t:
consumers.extend(_Consumers(placeholder, func_graphs))
return consumers
@@ -616,9 +633,13 @@ def _GradientsHelper(ys,
# ancestor graphs. This is necessary for correctly handling captured values.
func_graphs = []
curr_graph = src_graph
- while isinstance(curr_graph, function._FuncGraph): # pylint: disable=protected-access
+ while _IsFunction(curr_graph):
func_graphs.append(curr_graph)
- curr_graph = curr_graph._outer_graph # pylint: disable=protected-access
+ if isinstance(curr_graph, _function.FuncGraph):
+ curr_graph = curr_graph.outer_graph
+ else:
+ assert isinstance(curr_graph, framework_function._FuncGraph) # pylint: disable=protected-access
+ curr_graph = curr_graph._outer_graph # pylint: disable=protected-access
ys = _AsList(ys)
xs = _AsList(xs)
diff --git a/tensorflow/python/ops/gradients_test.py b/tensorflow/python/ops/gradients_test.py
index fa9910b351..3759d8a543 100644
--- a/tensorflow/python/ops/gradients_test.py
+++ b/tensorflow/python/ops/gradients_test.py
@@ -26,9 +26,10 @@ import numpy as np
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
+from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
-from tensorflow.python.framework import function
+from tensorflow.python.framework import function as framework_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
@@ -369,8 +370,8 @@ class FunctionGradientsTest(test_util.TensorFlowTestCase):
@classmethod
def _GetFunc(cls, **kwargs):
- return function.Defun(dtypes.float32, dtypes.float32, **
- kwargs)(cls.XSquarePlusB)
+ return framework_function.Defun(dtypes.float32, dtypes.float32, **
+ kwargs)(cls.XSquarePlusB)
def _GetFuncGradients(self, f, x_value, b_value):
x = constant_op.constant(x_value, name="x")
@@ -408,8 +409,9 @@ class FunctionGradientsTest(test_util.TensorFlowTestCase):
def testFunctionGradientsWithGradFunc(self):
g = ops.Graph()
with g.as_default():
- grad_func = function.Defun(dtypes.float32, dtypes.float32,
- dtypes.float32)(self.XSquarePlusBGradient)
+ grad_func = framework_function.Defun(dtypes.float32, dtypes.float32,
+ dtypes.float32)(
+ self.XSquarePlusBGradient)
f = self._GetFunc(grad_func=grad_func)
# Get gradients (should add SymbolicGradient node for function, which
# uses the grad_func above, which multiplies all gradients by 2).
@@ -430,8 +432,9 @@ class FunctionGradientsTest(test_util.TensorFlowTestCase):
def testFunctionGradientWithGradFuncAndRegistration(self):
g = ops.Graph()
with g.as_default():
- grad_func = function.Defun(dtypes.float32, dtypes.float32,
- dtypes.float32)(self.XSquarePlusBGradient)
+ grad_func = framework_function.Defun(dtypes.float32, dtypes.float32,
+ dtypes.float32)(
+ self.XSquarePlusBGradient)
with self.assertRaisesRegexp(ValueError, "Gradient defined twice"):
f = self._GetFunc(
grad_func=grad_func, python_grad_func=self._PythonGradient)
@@ -441,7 +444,7 @@ class FunctionGradientsTest(test_util.TensorFlowTestCase):
with ops.Graph().as_default():
x = constant_op.constant(1.0, name="x")
- @function.Defun()
+ @function.defun()
def Foo():
y = math_ops.multiply(x, 2.0, name="y")
g = gradients_impl.gradients(y, x)
@@ -456,7 +459,7 @@ class FunctionGradientsTest(test_util.TensorFlowTestCase):
x = constant_op.constant(1.0, name="x")
y = math_ops.multiply(x, 2.0, name="y")
- @function.Defun()
+ @framework_function.Defun()
def Foo():
g = gradients_impl.gradients(y, x)
return g[0]
@@ -469,7 +472,7 @@ class FunctionGradientsTest(test_util.TensorFlowTestCase):
with ops.Graph().as_default():
var = resource_variable_ops.ResourceVariable(1.0, name="var")
- @function.Defun()
+ @function.defun()
def Foo():
y = math_ops.multiply(var, 2.0, name="y")
g = gradients_impl.gradients(y, var)
@@ -486,11 +489,11 @@ class FunctionGradientsTest(test_util.TensorFlowTestCase):
x2 = constant_op.constant(2.0, name="x2")
x3 = math_ops.multiply(x1, x2, name="x3")
- @function.Defun()
+ @function.defun()
def Outer():
outer1 = array_ops.identity(x1, name="outer1")
- @function.Defun()
+ @function.defun()
def Inner():
inner1 = array_ops.identity(outer1, name="inner1")
inner2 = array_ops.identity(x2, name="inner2")
@@ -511,11 +514,11 @@ class FunctionGradientsTest(test_util.TensorFlowTestCase):
with ops.Graph().as_default():
x = constant_op.constant(1.0, name="x")
- @function.Defun()
+ @function.defun()
def Outer():
y = math_ops.multiply(x, 2.0, name="y")
- @function.Defun()
+ @function.defun()
def Inner():
z = math_ops.multiply(y, 3.0, name="z")
g = gradients_impl.gradients(z, y)
diff --git a/tensorflow/python/ops/io_ops.py b/tensorflow/python/ops/io_ops.py
index fbc1350c61..f84785df2c 100644
--- a/tensorflow/python/ops/io_ops.py
+++ b/tensorflow/python/ops/io_ops.py
@@ -33,8 +33,9 @@ from tensorflow.python.ops import gen_io_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_io_ops import *
-from tensorflow.python.util.tf_export import tf_export
# pylint: enable=wildcard-import
+from tensorflow.python.util import deprecation
+from tensorflow.python.util.tf_export import tf_export
# pylint: disable=protected-access
@@ -95,7 +96,7 @@ def _restore_slice(file_pattern, tensor_name, shape_and_slice, tensor_type,
preferred_shard, name=name)
-@tf_export("ReaderBase")
+@tf_export(v1=["ReaderBase"])
class ReaderBase(object):
"""Base class for different Reader types, that produce a record every step.
@@ -309,7 +310,7 @@ ops.NotDifferentiable("ReaderRestoreState")
ops.NotDifferentiable("ReaderReset")
-@tf_export("WholeFileReader")
+@tf_export(v1=["WholeFileReader"])
class WholeFileReader(ReaderBase):
"""A Reader that outputs the entire contents of a file as a value.
@@ -324,6 +325,9 @@ class WholeFileReader(ReaderBase):
@end_compatibility
"""
+ @deprecation.deprecated(
+ None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
+ "`tf.data.Dataset.map(tf.read_file)`.")
def __init__(self, name=None):
"""Create a WholeFileReader.
@@ -337,7 +341,7 @@ class WholeFileReader(ReaderBase):
ops.NotDifferentiable("WholeFileReader")
-@tf_export("TextLineReader")
+@tf_export(v1=["TextLineReader"])
class TextLineReader(ReaderBase):
"""A Reader that outputs the lines of a file delimited by newlines.
@@ -351,6 +355,9 @@ class TextLineReader(ReaderBase):
"""
# TODO(josh11b): Support serializing and restoring state.
+ @deprecation.deprecated(
+ None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
+ "`tf.data.TextLineDataset`.")
def __init__(self, skip_header_lines=None, name=None):
"""Create a TextLineReader.
@@ -367,7 +374,7 @@ class TextLineReader(ReaderBase):
ops.NotDifferentiable("TextLineReader")
-@tf_export("FixedLengthRecordReader")
+@tf_export(v1=["FixedLengthRecordReader"])
class FixedLengthRecordReader(ReaderBase):
"""A Reader that outputs fixed-length records from a file.
@@ -380,6 +387,9 @@ class FixedLengthRecordReader(ReaderBase):
"""
# TODO(josh11b): Support serializing and restoring state.
+ @deprecation.deprecated(
+ None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
+ "`tf.data.FixedLengthRecordDataset`.")
def __init__(self,
record_bytes,
header_bytes=None,
@@ -410,7 +420,7 @@ class FixedLengthRecordReader(ReaderBase):
ops.NotDifferentiable("FixedLengthRecordReader")
-@tf_export("TFRecordReader")
+@tf_export(v1=["TFRecordReader"])
class TFRecordReader(ReaderBase):
"""A Reader that outputs the records from a TFRecords file.
@@ -423,6 +433,9 @@ class TFRecordReader(ReaderBase):
"""
# TODO(josh11b): Support serializing and restoring state.
+ @deprecation.deprecated(
+ None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
+ "`tf.data.TFRecordDataset`.")
def __init__(self, name=None, options=None):
"""Create a TFRecordReader.
@@ -441,7 +454,7 @@ class TFRecordReader(ReaderBase):
ops.NotDifferentiable("TFRecordReader")
-@tf_export("LMDBReader")
+@tf_export(v1=["LMDBReader"])
class LMDBReader(ReaderBase):
"""A Reader that outputs the records from a LMDB file.
@@ -452,6 +465,10 @@ class LMDBReader(ReaderBase):
use `tf.data` to get data into your model.
@end_compatibility
"""
+
+ @deprecation.deprecated(
+ None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
+ "`tf.contrib.data.LMDBDataset`.")
def __init__(self, name=None, options=None):
"""Create a LMDBReader.
@@ -459,6 +476,7 @@ class LMDBReader(ReaderBase):
name: A name for the operation (optional).
options: A LMDBRecordOptions object (optional).
"""
+ del options
rr = gen_io_ops.lmdb_reader(name=name)
super(LMDBReader, self).__init__(rr)
@@ -466,7 +484,7 @@ class LMDBReader(ReaderBase):
ops.NotDifferentiable("LMDBReader")
-@tf_export("IdentityReader")
+@tf_export(v1=["IdentityReader"])
class IdentityReader(ReaderBase):
"""A Reader that outputs the queued work as both the key and value.
@@ -481,6 +499,9 @@ class IdentityReader(ReaderBase):
@end_compatibility
"""
+ @deprecation.deprecated(
+ None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
+ "`tf.data.Dataset.map(...)`.")
def __init__(self, name=None):
"""Create a IdentityReader.
diff --git a/tensorflow/python/ops/nn_ops.py b/tensorflow/python/ops/nn_ops.py
index 474e0bb295..ef9afd9e8e 100644
--- a/tensorflow/python/ops/nn_ops.py
+++ b/tensorflow/python/ops/nn_ops.py
@@ -2454,7 +2454,7 @@ def conv1d(value,
returned to the caller.
Args:
- value: A 3D `Tensor`. Must be of type `float16` or `float32`.
+ value: A 3D `Tensor`. Must be of type `float16`, `float32`, or `float64`.
filters: A 3D `Tensor`. Must have the same type as `value`.
stride: An `integer`. The number of entries by which
the filter is moved right at each step.
diff --git a/tensorflow/python/ops/parallel_for/pfor.py b/tensorflow/python/ops/parallel_for/pfor.py
index 3c914f6ff6..f9153b6d7d 100644
--- a/tensorflow/python/ops/parallel_for/pfor.py
+++ b/tensorflow/python/ops/parallel_for/pfor.py
@@ -21,8 +21,6 @@ from __future__ import print_function
import collections
-from absl import flags
-
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
@@ -41,6 +39,7 @@ from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import tensor_array_ops
+from tensorflow.python.platform import flags
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
@@ -2013,6 +2012,7 @@ def _convert_biasaddgrad(pfor_input):
@RegisterPForWithArgs("ReluGrad")
@RegisterPForWithArgs("TanhGrad")
@RegisterPForWithArgs("SigmoidGrad")
+@RegisterPForWithArgs("SoftplusGrad")
def _convert_grads(pfor_input, op_type, *args, **kw_args):
del args
del kw_args
diff --git a/tensorflow/python/ops/script_ops.py b/tensorflow/python/ops/script_ops.py
index 8d66de6b20..2ec4b540fb 100644
--- a/tensorflow/python/ops/script_ops.py
+++ b/tensorflow/python/ops/script_ops.py
@@ -287,19 +287,19 @@ def _internal_py_func(func,
# TODO(akshayka): Implement higher-order derivatives.
@ops.RegisterGradient("EagerPyFunc")
-def _EagerPyFuncGrad(op, dy):
+def _EagerPyFuncGrad(op, *dy):
"""Computes the gradient of an EagerPyFunc."""
token = op.get_attr("token")
- def eagerly_executed_grad(dy):
+ def eagerly_executed_grad(*dy):
tape, eager_inputs, eager_outputs = tape_cache.pop(compat.as_bytes(token))
return tape.gradient(eager_outputs, eager_inputs, output_gradients=dy)
with ops.control_dependencies(op.outputs):
return _internal_py_func(
func=eagerly_executed_grad,
- inp=[dy] if isinstance(dy, ops.Tensor) else dy,
+ inp=dy,
Tout=[tensor.dtype for tensor in op.inputs],
eager=True,
is_grad_func=True)
diff --git a/tensorflow/python/tools/component_api_helper.py b/tensorflow/python/tools/component_api_helper.py
index 988ecc61f0..97f46719e5 100644
--- a/tensorflow/python/tools/component_api_helper.py
+++ b/tensorflow/python/tools/component_api_helper.py
@@ -65,9 +65,10 @@ def package_hook(parent_package_str, child_package_str, error_msg=None):
Will allow the following import statement to work.
>>> import parent.child
"""
- child_pkg_path = [os.path.join(os.path.dirname(child_pkg.__file__), "..")]
+ child_pkg_path = [os.path.abspath(
+ os.path.join(os.path.dirname(child_pkg.__file__), ".."))]
try:
- parent_pkg.__path__ += child_pkg_path
+ parent_pkg.__path__ = child_pkg_path + parent_pkg.__path__
except AttributeError:
parent_pkg.__path__ = child_pkg_path
diff --git a/tensorflow/python/tools/print_selective_registration_header_test.py b/tensorflow/python/tools/print_selective_registration_header_test.py
index 4b3d98242c..cce8060fb9 100644
--- a/tensorflow/python/tools/print_selective_registration_header_test.py
+++ b/tensorflow/python/tools/print_selective_registration_header_test.py
@@ -59,6 +59,9 @@ GRAPH_DEF_TXT = """
}
"""
+# AccumulateNV2 is included because it should be included in the header despite
+# lacking a kernel (it's rewritten by AccumulateNV2RemovePass; see
+# core/common_runtime/accumulate_n_optimizer.cc.
GRAPH_DEF_TXT_2 = """
node: {
name: "node_4"
@@ -67,6 +70,12 @@ GRAPH_DEF_TXT_2 = """
device: "/cpu:0"
attr: { key: "T" value: { type: DT_FLOAT } }
}
+ node: {
+ name: "node_5"
+ op: "AccumulateNV2"
+ attr: { key: "T" value: { type: DT_INT32 } }
+ attr: { key : "N" value: { i: 3 } }
+ }
"""
@@ -100,6 +109,7 @@ class PrintOpFilegroupTest(test.TestCase):
self.assertListEqual(
[
+ ('AccumulateNV2', None), #
('BiasAdd', 'BiasOp<CPUDevice, float>'), #
('MatMul',
matmul_prefix + 'MatMulOp<CPUDevice, double, false >'), #
@@ -117,6 +127,7 @@ class PrintOpFilegroupTest(test.TestCase):
'rawproto', self.WriteGraphFiles(graphs), default_ops)
self.assertListEqual(
[
+ ('AccumulateNV2', None), #
('BiasAdd', 'BiasOp<CPUDevice, float>'), #
('MatMul',
matmul_prefix + 'MatMulOp<CPUDevice, double, false >'), #
@@ -196,6 +207,7 @@ class PrintOpFilegroupTest(test.TestCase):
constexpr inline bool ShouldRegisterOp(const char op[]) {
return false
+ || isequal(op, "AccumulateNV2")
|| isequal(op, "BiasAdd")
;
}
diff --git a/tensorflow/python/tools/selective_registration_header_lib.py b/tensorflow/python/tools/selective_registration_header_lib.py
index dc0612bb3f..b99c632c3e 100644
--- a/tensorflow/python/tools/selective_registration_header_lib.py
+++ b/tensorflow/python/tools/selective_registration_header_lib.py
@@ -32,6 +32,16 @@ from tensorflow.python import pywrap_tensorflow
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging
+# Usually, we use each graph node to induce registration of an op and
+# corresponding kernel; nodes without a corresponding kernel (perhaps due to
+# attr types) generate a warning but are otherwise ignored. Ops in this set are
+# registered even if there's no corresponding kernel.
+OPS_WITHOUT_KERNEL_WHITELIST = frozenset([
+ # AccumulateNV2 is rewritten away by AccumulateNV2RemovePass; see
+ # core/common_runtime/accumulate_n_optimizer.cc.
+ 'AccumulateNV2'
+])
+
def get_ops_and_kernels(proto_fileformat, proto_files, default_ops_str):
"""Gets the ops and kernels needed from the model files."""
@@ -53,8 +63,10 @@ def get_ops_and_kernels(proto_fileformat, proto_files, default_ops_str):
node_def.device = '/cpu:0'
kernel_class = pywrap_tensorflow.TryFindKernelClass(
node_def.SerializeToString())
- if kernel_class:
- op_and_kernel = (str(node_def.op), str(kernel_class.decode('utf-8')))
+ op = str(node_def.op)
+ if kernel_class or op in OPS_WITHOUT_KERNEL_WHITELIST:
+ op_and_kernel = (op, str(kernel_class.decode('utf-8'))
+ if kernel_class else None)
if op_and_kernel not in ops:
ops.add(op_and_kernel)
else:
@@ -129,6 +141,7 @@ def get_header_from_ops_and_kernels(ops_and_kernels,
'''
line += 'constexpr const char* kNecessaryOpKernelClasses[] = {\n'
for _, kernel_class in ops_and_kernels:
+ if kernel_class is None: continue
line += '"%s",\n' % kernel_class
line += '};'
append(line)
diff --git a/tensorflow/python/training/checkpoint_ops.py b/tensorflow/python/training/checkpoint_ops.py
index a6e9662b73..cfd9b39ddc 100644
--- a/tensorflow/python/training/checkpoint_ops.py
+++ b/tensorflow/python/training/checkpoint_ops.py
@@ -268,7 +268,8 @@ def _load_and_remap_matrix_initializer(ckpt_path,
vocab files are the same, and no column remapping is done.
The returned initializer only supports div-partitioning along the row axis. It
- does not support partitioning along the column axis or mod-partitioning.
+ does not support partitioning along the column axis (as this is not common in
+ practice) or mod-partitioning.
NOTE: When this is used to warm-start variables, client code should use
`tf.lookup.index_table_from_tensor()` like
diff --git a/tensorflow/python/training/input.py b/tensorflow/python/training/input.py
index 0d6207f8c4..9d9db70890 100644
--- a/tensorflow/python/training/input.py
+++ b/tensorflow/python/training/input.py
@@ -45,6 +45,7 @@ from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
+from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@@ -75,7 +76,10 @@ def match_filenames_once(pattern, name=None):
collections=[ops.GraphKeys.LOCAL_VARIABLES])
-@tf_export("train.limit_epochs")
+@tf_export(v1=["train.limit_epochs"])
+@deprecation.deprecated(
+ None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
+ "`tf.data.Dataset.from_tensors(tensor).repeat(num_epochs)`.")
def limit_epochs(tensor, num_epochs=None, name=None):
"""Returns tensor `num_epochs` times and then raises an `OutOfRange` error.
@@ -108,7 +112,12 @@ def limit_epochs(tensor, num_epochs=None, name=None):
return array_ops.identity(tensor, name=name)
-@tf_export("train.input_producer")
+@tf_export(v1=["train.input_producer"])
+@deprecation.deprecated(
+ None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
+ "`tf.data.Dataset.from_tensor_slices(input_tensor).shuffle"
+ "(tf.shape(input_tensor, out_type=tf.int64)[0]).repeat(num_epochs)`. If "
+ "`shuffle=False`, omit the `.shuffle(...)`.")
def input_producer(input_tensor,
element_shape=None,
num_epochs=None,
@@ -191,7 +200,12 @@ def input_producer(input_tensor,
return q
-@tf_export("train.string_input_producer")
+@tf_export(v1=["train.string_input_producer"])
+@deprecation.deprecated(
+ None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
+ "`tf.data.Dataset.from_tensor_slices(string_tensor).shuffle"
+ "(tf.shape(input_tensor, out_type=tf.int64)[0]).repeat(num_epochs)`. If "
+ "`shuffle=False`, omit the `.shuffle(...)`.")
def string_input_producer(string_tensor,
num_epochs=None,
shuffle=True,
@@ -261,7 +275,11 @@ def string_input_producer(string_tensor,
cancel_op=cancel_op)
-@tf_export("train.range_input_producer")
+@tf_export(v1=["train.range_input_producer"])
+@deprecation.deprecated(
+ None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
+ "`tf.data.Dataset.range(limit).shuffle(limit).repeat(num_epochs)`. If "
+ "`shuffle=False`, omit the `.shuffle(...)`.")
def range_input_producer(limit, num_epochs=None, shuffle=True, seed=None,
capacity=32, shared_name=None, name=None):
"""Produces the integers from 0 to limit-1 in a queue.
@@ -299,7 +317,12 @@ def range_input_producer(limit, num_epochs=None, shuffle=True, seed=None,
shared_name, "fraction_of_%d_full" % capacity, name)
-@tf_export("train.slice_input_producer")
+@tf_export(v1=["train.slice_input_producer"])
+@deprecation.deprecated(
+ None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
+ "`tf.data.Dataset.from_tensor_slices(tuple(tensor_list)).shuffle"
+ "(tf.shape(input_tensor, out_type=tf.int64)[0]).repeat(num_epochs)`. If "
+ "`shuffle=False`, omit the `.shuffle(...)`.")
def slice_input_producer(tensor_list, num_epochs=None, shuffle=True, seed=None,
capacity=32, shared_name=None, name=None):
"""Produces a slice of each `Tensor` in `tensor_list`.
@@ -894,7 +917,11 @@ def _shuffle_batch_join(tensors_list, batch_size, capacity,
# Batching functions ----------------------------------------------------------
-@tf_export("train.batch")
+@tf_export(v1=["train.batch"])
+@deprecation.deprecated(
+ None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
+ "`tf.data.Dataset.batch(batch_size)` (or `padded_batch(...)` if "
+ "`dynamic_pad=True`).")
def batch(tensors, batch_size, num_threads=1, capacity=32,
enqueue_many=False, shapes=None, dynamic_pad=False,
allow_smaller_final_batch=False, shared_name=None, name=None):
@@ -989,7 +1016,11 @@ def batch(tensors, batch_size, num_threads=1, capacity=32,
name=name)
-@tf_export("train.maybe_batch")
+@tf_export(v1=["train.maybe_batch"])
+@deprecation.deprecated(
+ None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
+ "`tf.data.Dataset.filter(...).batch(batch_size)` (or `padded_batch(...)`"
+ " if `dynamic_pad=True`).")
def maybe_batch(tensors, keep_input, batch_size, num_threads=1, capacity=32,
enqueue_many=False, shapes=None, dynamic_pad=False,
allow_smaller_final_batch=False, shared_name=None, name=None):
@@ -1042,7 +1073,11 @@ def maybe_batch(tensors, keep_input, batch_size, num_threads=1, capacity=32,
name=name)
-@tf_export("train.batch_join")
+@tf_export(v1=["train.batch_join"])
+@deprecation.deprecated(
+ None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
+ "`tf.data.Dataset.interleave(...).batch(batch_size)` (or "
+ "`padded_batch(...)` if `dynamic_pad=True`).")
def batch_join(tensors_list, batch_size, capacity=32, enqueue_many=False,
shapes=None, dynamic_pad=False, allow_smaller_final_batch=False,
shared_name=None, name=None):
@@ -1148,7 +1183,11 @@ def batch_join(tensors_list, batch_size, capacity=32, enqueue_many=False,
name=name)
-@tf_export("train.maybe_batch_join")
+@tf_export(v1=["train.maybe_batch_join"])
+@deprecation.deprecated(
+ None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
+ "`tf.data.Dataset.interleave(...).filter(...).batch(batch_size)` (or "
+ "`padded_batch(...)` if `dynamic_pad=True`).")
def maybe_batch_join(tensors_list, keep_input, batch_size, capacity=32,
enqueue_many=False, shapes=None, dynamic_pad=False,
allow_smaller_final_batch=False, shared_name=None,
@@ -1201,7 +1240,10 @@ def maybe_batch_join(tensors_list, keep_input, batch_size, capacity=32,
name=name)
-@tf_export("train.shuffle_batch")
+@tf_export(v1=["train.shuffle_batch"])
+@deprecation.deprecated(
+ None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
+ "`tf.data.Dataset.shuffle(min_after_dequeue).batch(batch_size)`.")
def shuffle_batch(tensors, batch_size, capacity, min_after_dequeue,
num_threads=1, seed=None, enqueue_many=False, shapes=None,
allow_smaller_final_batch=False, shared_name=None, name=None):
@@ -1301,7 +1343,11 @@ def shuffle_batch(tensors, batch_size, capacity, min_after_dequeue,
name=name)
-@tf_export("train.maybe_shuffle_batch")
+@tf_export(v1=["train.maybe_shuffle_batch"])
+@deprecation.deprecated(
+ None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
+ "`tf.data.Dataset.filter(...).shuffle(min_after_dequeue).batch(batch_size)`"
+ ".")
def maybe_shuffle_batch(tensors, batch_size, capacity, min_after_dequeue,
keep_input, num_threads=1, seed=None,
enqueue_many=False, shapes=None,
@@ -1361,7 +1407,11 @@ def maybe_shuffle_batch(tensors, batch_size, capacity, min_after_dequeue,
name=name)
-@tf_export("train.shuffle_batch_join")
+@tf_export(v1=["train.shuffle_batch_join"])
+@deprecation.deprecated(
+ None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
+ "`tf.data.Dataset.interleave(...).shuffle(min_after_dequeue).batch"
+ "(batch_size)`.")
def shuffle_batch_join(tensors_list, batch_size, capacity,
min_after_dequeue, seed=None, enqueue_many=False,
shapes=None, allow_smaller_final_batch=False,
@@ -1455,7 +1505,11 @@ def shuffle_batch_join(tensors_list, batch_size, capacity,
name=name)
-@tf_export("train.maybe_shuffle_batch_join")
+@tf_export(v1=["train.maybe_shuffle_batch_join"])
+@deprecation.deprecated(
+ None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
+ "`tf.data.Dataset.interleave(...).filter(...).shuffle(min_after_dequeue)"
+ ".batch(batch_size)`.")
def maybe_shuffle_batch_join(tensors_list, batch_size, capacity,
min_after_dequeue, keep_input, seed=None,
enqueue_many=False, shapes=None,
diff --git a/tensorflow/python/training/learning_rate_decay.py b/tensorflow/python/training/learning_rate_decay.py
index fd195a7965..29b5465321 100644
--- a/tensorflow/python/training/learning_rate_decay.py
+++ b/tensorflow/python/training/learning_rate_decay.py
@@ -17,19 +17,12 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import math
-
from tensorflow.python.eager import context
-from tensorflow.python.framework import constant_op
-from tensorflow.python.framework import dtypes
-from tensorflow.python.framework import ops
-from tensorflow.python.ops import control_flow_ops
-from tensorflow.python.ops import math_ops
-from tensorflow.python.ops import random_ops
+from tensorflow.python.training import learning_rate_decay_v2
from tensorflow.python.util.tf_export import tf_export
-@tf_export("train.exponential_decay")
+@tf_export(v1=["train.exponential_decay"])
def exponential_decay(learning_rate,
global_step,
decay_steps,
@@ -95,32 +88,19 @@ def exponential_decay(learning_rate,
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
- if global_step is None:
- raise ValueError("global_step is required for exponential_decay.")
- with ops.name_scope(
- name, "ExponentialDecay",
- [learning_rate, global_step, decay_steps, decay_rate]) as name:
- learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
- dtype = learning_rate.dtype
- decay_steps = math_ops.cast(decay_steps, dtype)
- decay_rate = math_ops.cast(decay_rate, dtype)
-
- def decayed_lr():
- """Helper to recompute learning rate; most helpful in eager-mode."""
- global_step_recomp = math_ops.cast(global_step, dtype)
- p = global_step_recomp / decay_steps
- if staircase:
- p = math_ops.floor(p)
- return math_ops.multiply(
- learning_rate, math_ops.pow(decay_rate, p), name=name)
-
- if not context.executing_eagerly():
- decayed_lr = decayed_lr()
-
- return decayed_lr
-
-
-@tf_export("train.piecewise_constant")
+ decayed_lr = learning_rate_decay_v2.exponential_decay(learning_rate,
+ global_step,
+ decay_steps,
+ decay_rate,
+ staircase=staircase,
+ name=name)
+ if not context.executing_eagerly():
+ decayed_lr = decayed_lr()
+
+ return decayed_lr
+
+
+@tf_export(v1=["train.piecewise_constant"])
def piecewise_constant(x, boundaries, values, name=None):
"""Piecewise constant from boundaries and interval values.
@@ -163,58 +143,15 @@ def piecewise_constant(x, boundaries, values, name=None):
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
- if len(boundaries) != len(values) - 1:
- raise ValueError(
- "The length of boundaries should be 1 less than the length of values")
- with ops.name_scope(name, "PiecewiseConstant",
- [x, boundaries, values, name]) as name:
- boundaries = ops.convert_n_to_tensor(boundaries)
- values = ops.convert_n_to_tensor(values)
-
- def decayed_lr():
- """Helper to recompute learning rate; most helpful in eager-mode."""
- x_recomp = ops.convert_to_tensor(x)
- # Avoid explicit conversion to x's dtype. This could result in faulty
- # comparisons, for example if floats are converted to integers.
- for i, b in enumerate(boundaries):
- if b.dtype.base_dtype != x_recomp.dtype.base_dtype:
- # We can promote int32 boundaries to int64 without loss of precision.
- # This covers the most common case where the user passes in boundaries
- # as an array of Python integers.
- if (b.dtype.base_dtype == dtypes.int32 and
- x_recomp.dtype.base_dtype == dtypes.int64):
- b = math_ops.cast(b, x_recomp.dtype.base_dtype)
- boundaries[i] = b
- else:
- raise ValueError(
- "Boundaries (%s) must have the same dtype as x (%s)." %
- (b.dtype.base_dtype, x_recomp.dtype.base_dtype))
- # TODO(rdipietro): Ensure that boundaries' elements strictly increases.
- for v in values[1:]:
- if v.dtype.base_dtype != values[0].dtype.base_dtype:
- raise ValueError(
- "Values must have elements all with the same dtype (%s vs %s)." %
- (values[0].dtype.base_dtype, v.dtype.base_dtype))
- pred_fn_pairs = []
- pred_fn_pairs.append((x_recomp <= boundaries[0], lambda: values[0]))
- pred_fn_pairs.append((x_recomp > boundaries[-1], lambda: values[-1]))
- for low, high, v in zip(boundaries[:-1], boundaries[1:], values[1:-1]):
- # Need to bind v here; can do this with lambda v=v: ...
- pred = (x_recomp > low) & (x_recomp <= high)
- pred_fn_pairs.append((pred, lambda v=v: v))
-
- # The default isn't needed here because our conditions are mutually
- # exclusive and exhaustive, but tf.case requires it.
- default = lambda: values[0]
- return control_flow_ops.case(pred_fn_pairs, default, exclusive=True)
-
- if not context.executing_eagerly():
- decayed_lr = decayed_lr()
-
- return decayed_lr
-
-
-@tf_export("train.polynomial_decay")
+ decayed_lr = learning_rate_decay_v2.piecewise_constant(x, boundaries, values,
+ name=name)
+ if not context.executing_eagerly():
+ decayed_lr = decayed_lr()
+
+ return decayed_lr
+
+
+@tf_export(v1=["train.polynomial_decay"])
def polynomial_decay(learning_rate,
global_step,
decay_steps,
@@ -299,46 +236,22 @@ def polynomial_decay(learning_rate,
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
- if global_step is None:
- raise ValueError("global_step is required for polynomial_decay.")
- with ops.name_scope(
- name, "PolynomialDecay",
- [learning_rate, global_step, decay_steps, end_learning_rate, power
- ]) as name:
- learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
- dtype = learning_rate.dtype
- end_learning_rate = math_ops.cast(end_learning_rate, dtype)
- power = math_ops.cast(power, dtype)
-
- def decayed_lr():
- """Helper to recompute learning rate; most helpful in eager-mode."""
- global_step_recomp = math_ops.cast(global_step, dtype)
- decay_steps_recomp = math_ops.cast(decay_steps, dtype)
- if cycle:
- # Find the first multiple of decay_steps that is bigger than
- # global_step. If global_step is zero set the multiplier to 1
- multiplier = control_flow_ops.cond(
- math_ops.equal(global_step_recomp, 0), lambda: 1.0,
- lambda: math_ops.ceil(global_step_recomp / decay_steps))
- decay_steps_recomp = math_ops.multiply(decay_steps_recomp, multiplier)
- else:
- # Make sure that the global_step used is not bigger than decay_steps.
- global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
-
- p = math_ops.div(global_step_recomp, decay_steps_recomp)
- return math_ops.add(
- math_ops.multiply(learning_rate - end_learning_rate,
- math_ops.pow(1 - p, power)),
- end_learning_rate,
- name=name)
-
- if not context.executing_eagerly():
- decayed_lr = decayed_lr()
-
- return decayed_lr
-
-
-@tf_export("train.natural_exp_decay")
+ decayed_lr = learning_rate_decay_v2.polynomial_decay(
+ learning_rate,
+ global_step,
+ decay_steps,
+ end_learning_rate=end_learning_rate,
+ power=power,
+ cycle=cycle,
+ name=name)
+
+ if not context.executing_eagerly():
+ decayed_lr = decayed_lr()
+
+ return decayed_lr
+
+
+@tf_export(v1=["train.natural_exp_decay"])
def natural_exp_decay(learning_rate,
global_step,
decay_steps,
@@ -410,32 +323,17 @@ def natural_exp_decay(learning_rate,
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
- if global_step is None:
- raise ValueError("global_step is required for natural_exp_decay.")
- with ops.name_scope(name, "NaturalExpDecay",
- [learning_rate, global_step, decay_rate]) as name:
- learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
- dtype = learning_rate.dtype
- decay_steps = math_ops.cast(decay_steps, dtype)
- decay_rate = math_ops.cast(decay_rate, dtype)
-
- def decayed_lr():
- """Helper to recompute learning rate; most helpful in eager-mode."""
- global_step_recomp = math_ops.cast(global_step, dtype)
- p = global_step_recomp / decay_steps
- if staircase:
- p = math_ops.floor(p)
- exponent = math_ops.exp(
- math_ops.multiply(math_ops.negative(decay_rate), p))
- return math_ops.multiply(learning_rate, exponent, name=name)
-
- if not context.executing_eagerly():
- decayed_lr = decayed_lr()
-
- return decayed_lr
-
-
-@tf_export("train.inverse_time_decay")
+ decayed_lr = learning_rate_decay_v2.natural_exp_decay(
+ learning_rate, global_step, decay_steps, decay_rate, staircase=staircase,
+ name=name)
+
+ if not context.executing_eagerly():
+ decayed_lr = decayed_lr()
+
+ return decayed_lr
+
+
+@tf_export(v1=["train.inverse_time_decay"])
def inverse_time_decay(learning_rate,
global_step,
decay_steps,
@@ -507,32 +405,21 @@ def inverse_time_decay(learning_rate,
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
- if global_step is None:
- raise ValueError("global_step is required for inverse_time_decay.")
- with ops.name_scope(name, "InverseTimeDecay",
- [learning_rate, global_step, decay_rate]) as name:
- learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
- dtype = learning_rate.dtype
- decay_steps = math_ops.cast(decay_steps, dtype)
- decay_rate = math_ops.cast(decay_rate, dtype)
-
- def decayed_lr():
- """Helper to recompute learning rate; most helpful in eager-mode."""
- global_step_recomp = math_ops.cast(global_step, dtype)
- p = global_step_recomp / decay_steps
- if staircase:
- p = math_ops.floor(p)
- const = math_ops.cast(constant_op.constant(1), dtype)
- denom = math_ops.add(const, math_ops.multiply(decay_rate, p))
- return math_ops.div(learning_rate, denom, name=name)
-
- if not context.executing_eagerly():
- decayed_lr = decayed_lr()
-
- return decayed_lr
-
-
-@tf_export("train.cosine_decay")
+ decayed_lr = learning_rate_decay_v2.inverse_time_decay(
+ learning_rate,
+ global_step,
+ decay_steps,
+ decay_rate,
+ staircase=staircase,
+ name=name)
+
+ if not context.executing_eagerly():
+ decayed_lr = decayed_lr()
+
+ return decayed_lr
+
+
+@tf_export(v1=["train.cosine_decay"])
def cosine_decay(learning_rate, global_step, decay_steps, alpha=0.0, name=None):
"""Applies cosine decay to the learning rate.
@@ -581,32 +468,16 @@ def cosine_decay(learning_rate, global_step, decay_steps, alpha=0.0, name=None):
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
- if global_step is None:
- raise ValueError("cosine decay requires global_step")
- with ops.name_scope(name, "CosineDecay",
- [learning_rate, global_step]) as name:
- learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
- dtype = learning_rate.dtype
- decay_steps = math_ops.cast(decay_steps, dtype)
-
- def decayed_lr():
- """Helper to recompute learning rate; most helpful in eager-mode."""
- global_step_recomp = math_ops.cast(global_step, dtype)
- global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
- completed_fraction = global_step_recomp / decay_steps
- cosine_decayed = 0.5 * (1.0 + math_ops.cos(
- constant_op.constant(math.pi) * completed_fraction))
-
- decayed = (1 - alpha) * cosine_decayed + alpha
- return math_ops.multiply(learning_rate, decayed)
+ decayed_lr = learning_rate_decay_v2.cosine_decay(
+ learning_rate, global_step, decay_steps, alpha=alpha, name=name)
- if not context.executing_eagerly():
- decayed_lr = decayed_lr()
+ if not context.executing_eagerly():
+ decayed_lr = decayed_lr()
- return decayed_lr
+ return decayed_lr
-@tf_export("train.cosine_decay_restarts")
+@tf_export(v1=["train.cosine_decay_restarts"])
def cosine_decay_restarts(learning_rate,
global_step,
first_decay_steps,
@@ -664,57 +535,22 @@ def cosine_decay_restarts(learning_rate,
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
- if global_step is None:
- raise ValueError("cosine decay restarts requires global_step")
- with ops.name_scope(name, "SGDRDecay", [learning_rate, global_step]) as name:
- learning_rate = ops.convert_to_tensor(
- learning_rate, name="initial_learning_rate")
- dtype = learning_rate.dtype
- first_decay_steps = math_ops.cast(first_decay_steps, dtype)
- alpha = math_ops.cast(alpha, dtype)
- t_mul = math_ops.cast(t_mul, dtype)
- m_mul = math_ops.cast(m_mul, dtype)
-
- def decayed_lr():
- """Helper to recompute learning rate; most helpful in eager-mode."""
- global_step_recomp = math_ops.cast(global_step, dtype)
- completed_fraction = global_step_recomp / first_decay_steps
-
- def compute_step(completed_fraction, geometric=False):
- """Helper for `cond` operation."""
- if geometric:
- i_restart = math_ops.floor(
- math_ops.log(1.0 - completed_fraction * (1.0 - t_mul)) /
- math_ops.log(t_mul))
-
- sum_r = (1.0 - t_mul**i_restart) / (1.0 - t_mul)
- completed_fraction = (completed_fraction - sum_r) / t_mul**i_restart
-
- else:
- i_restart = math_ops.floor(completed_fraction)
- completed_fraction -= i_restart
+ decayed_lr = learning_rate_decay_v2.cosine_decay_restarts(
+ learning_rate,
+ global_step,
+ first_decay_steps,
+ t_mul=t_mul,
+ m_mul=m_mul,
+ alpha=alpha,
+ name=name)
- return i_restart, completed_fraction
+ if not context.executing_eagerly():
+ decayed_lr = decayed_lr()
- i_restart, completed_fraction = control_flow_ops.cond(
- math_ops.equal(t_mul, 1.0),
- lambda: compute_step(completed_fraction, geometric=False),
- lambda: compute_step(completed_fraction, geometric=True))
+ return decayed_lr
- m_fac = m_mul**i_restart
- cosine_decayed = 0.5 * m_fac * (1.0 + math_ops.cos(
- constant_op.constant(math.pi) * completed_fraction))
- decayed = (1 - alpha) * cosine_decayed + alpha
- return math_ops.multiply(learning_rate, decayed, name=name)
-
- if not context.executing_eagerly():
- decayed_lr = decayed_lr()
-
- return decayed_lr
-
-
-@tf_export("train.linear_cosine_decay")
+@tf_export(v1=["train.linear_cosine_decay"])
def linear_cosine_decay(learning_rate,
global_step,
decay_steps,
@@ -781,37 +617,22 @@ def linear_cosine_decay(learning_rate,
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
- if global_step is None:
- raise ValueError("linear cosine decay requires global_step")
- with ops.name_scope(name, "LinearCosineDecay",
- [learning_rate, global_step]) as name:
- learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
- dtype = learning_rate.dtype
- decay_steps = math_ops.cast(decay_steps, dtype)
- num_periods = math_ops.cast(num_periods, dtype)
- alpha = math_ops.cast(alpha, dtype)
- beta = math_ops.cast(beta, dtype)
-
- def decayed_lr():
- """Helper to recompute learning rate; most helpful in eager-mode."""
- global_step_recomp = math_ops.cast(global_step, dtype)
- global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
- linear_decayed = (decay_steps - global_step_recomp) / decay_steps
- completed_fraction = global_step_recomp / decay_steps
- fraction = 2.0 * num_periods * completed_fraction
- cosine_decayed = 0.5 * (
- 1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
-
- linear_cosine_decayed = (alpha + linear_decayed) * cosine_decayed + beta
- return math_ops.multiply(learning_rate, linear_cosine_decayed, name=name)
-
- if not context.executing_eagerly():
- decayed_lr = decayed_lr()
-
- return decayed_lr
-
-
-@tf_export("train.noisy_linear_cosine_decay")
+ decayed_lr = learning_rate_decay_v2.linear_cosine_decay(
+ learning_rate,
+ global_step,
+ decay_steps,
+ num_periods=num_periods,
+ alpha=alpha,
+ beta=beta,
+ name=name)
+
+ if not context.executing_eagerly():
+ decayed_lr = decayed_lr()
+
+ return decayed_lr
+
+
+@tf_export(v1=["train.noisy_linear_cosine_decay"])
def noisy_linear_cosine_decay(learning_rate,
global_step,
decay_steps,
@@ -886,42 +707,17 @@ def noisy_linear_cosine_decay(learning_rate,
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
- if global_step is None:
- raise ValueError("noisy linear cosine decay requires global_step")
- with ops.name_scope(name, "NoisyLinearCosineDecay",
- [learning_rate, global_step]) as name:
- learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
- dtype = learning_rate.dtype
- decay_steps = math_ops.cast(decay_steps, dtype)
- initial_variance = math_ops.cast(initial_variance, dtype)
- variance_decay = math_ops.cast(variance_decay, dtype)
- num_periods = math_ops.cast(num_periods, dtype)
- alpha = math_ops.cast(alpha, dtype)
- beta = math_ops.cast(beta, dtype)
-
- def decayed_lr():
- """Helper to recompute learning rate; most helpful in eager-mode."""
- global_step_recomp = math_ops.cast(global_step, dtype)
- global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
- linear_decayed = (decay_steps - global_step_recomp) / decay_steps
- variance = initial_variance / (
- math_ops.pow(1.0 + global_step_recomp, variance_decay))
- std = math_ops.sqrt(variance)
- noisy_linear_decayed = (
- linear_decayed + random_ops.random_normal(
- linear_decayed.shape, stddev=std))
-
- completed_fraction = global_step_recomp / decay_steps
- fraction = 2.0 * num_periods * completed_fraction
- cosine_decayed = 0.5 * (
- 1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
- noisy_linear_cosine_decayed = (
- (alpha + noisy_linear_decayed) * cosine_decayed + beta)
-
- return math_ops.multiply(
- learning_rate, noisy_linear_cosine_decayed, name=name)
-
- if not context.executing_eagerly():
- decayed_lr = decayed_lr()
-
- return decayed_lr
+ decayed_lr = learning_rate_decay_v2.noisy_linear_cosine_decay(
+ learning_rate, global_step,
+ decay_steps,
+ initial_variance=initial_variance,
+ variance_decay=variance_decay,
+ num_periods=num_periods,
+ alpha=alpha,
+ beta=beta,
+ name=name)
+
+ if not context.executing_eagerly():
+ decayed_lr = decayed_lr()
+
+ return decayed_lr
diff --git a/tensorflow/python/training/learning_rate_decay_v2.py b/tensorflow/python/training/learning_rate_decay_v2.py
new file mode 100644
index 0000000000..9c5e144be6
--- /dev/null
+++ b/tensorflow/python/training/learning_rate_decay_v2.py
@@ -0,0 +1,898 @@
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Various learning rate decay functions."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import functools
+import math
+
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import random_ops
+from tensorflow.python.util.tf_export import tf_export
+
+
+@tf_export("train.exponential_decay", v1=[])
+def exponential_decay(learning_rate,
+ global_step,
+ decay_steps,
+ decay_rate,
+ staircase=False,
+ name=None):
+ """Applies exponential decay to the learning rate.
+
+ When training a model, it is often recommended to lower the learning rate as
+ the training progresses. This function applies an exponential decay function
+ to a provided initial learning rate. It requires a `global_step` value to
+ compute the decayed learning rate. You can just pass a TensorFlow variable
+ that you increment at each training step.
+
+ The function returns a no-arg function that produces the decayed learning
+ rate. This can be useful for changing the learning rate value across
+ different invocations of optimizer functions.
+ It is computed as:
+
+ ```python
+ decayed_learning_rate = learning_rate *
+ decay_rate ^ (global_step / decay_steps)
+ ```
+
+ If the argument `staircase` is `True`, then `global_step / decay_steps` is an
+ integer division and the decayed learning rate follows a staircase function.
+
+ Example: decay every 100000 steps with a base of 0.96:
+
+ ```python
+ ...
+ global_step = tf.Variable(0, trainable=False)
+ starter_learning_rate = 0.1
+ learning_rate_fn = tf.train.exponential_decay(starter_learning_rate,
+ global_step, 100000, 0.96,
+ staircase=True)
+ # Passing global_step to minimize() will increment it at each step.
+ learning_step = (
+ tf.train.GradientDescentOptimizer(learning_rate_fn)
+ .minimize(...my loss..., global_step=global_step)
+ )
+ ```
+
+ Args:
+ learning_rate: A scalar `float32` or `float64` `Tensor` or a
+ Python number. The initial learning rate.
+ global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
+ Global step to use for the decay computation. Must not be negative.
+ decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
+ Must be positive. See the decay computation above.
+ decay_rate: A scalar `float32` or `float64` `Tensor` or a
+ Python number. The decay rate.
+ staircase: Boolean. If `True` decay the learning rate at discrete intervals
+ name: String. Optional name of the operation. Defaults to
+ 'ExponentialDecay'.
+
+ Returns:
+ A no-arg function that outputs the decayed learning rate, a scalar `Tensor`
+ of the same type as `learning_rate`.
+
+ Raises:
+ ValueError: if `global_step` is not supplied.
+ """
+ if global_step is None:
+ raise ValueError("global_step is required for exponential_decay.")
+ def decayed_lr(learning_rate, global_step, decay_steps, decay_rate,
+ staircase, name):
+ """Helper to recompute learning rate; most helpful in eager-mode."""
+ with ops.name_scope(
+ name, "ExponentialDecay",
+ [learning_rate, global_step, decay_steps, decay_rate]) as name:
+ learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
+ dtype = learning_rate.dtype
+ decay_steps = math_ops.cast(decay_steps, dtype)
+ decay_rate = math_ops.cast(decay_rate, dtype)
+
+ global_step_recomp = math_ops.cast(global_step, dtype)
+ p = global_step_recomp / decay_steps
+ if staircase:
+ p = math_ops.floor(p)
+ return math_ops.multiply(
+ learning_rate, math_ops.pow(decay_rate, p), name=name)
+
+ return functools.partial(decayed_lr, learning_rate, global_step, decay_steps,
+ decay_rate, staircase, name)
+
+
+@tf_export("train.piecewise_constant", v1=[])
+def piecewise_constant(x, boundaries, values, name=None):
+ """Piecewise constant from boundaries and interval values.
+
+ This function returns a no-arg callable to compute the piecewise constant.
+ This can be useful for changing the learning rate value across
+ different invocations of optimizer functions.
+
+ Example: use a learning rate that's 1.0 for the first 100001 steps, 0.5
+ for the next 10000 steps, and 0.1 for any additional steps.
+
+ ```python
+ global_step = tf.Variable(0, trainable=False)
+ boundaries = [100000, 110000]
+ values = [1.0, 0.5, 0.1]
+ learning_rate_fn = tf.train.piecewise_constant(global_step, boundaries,
+ values)
+ learning_rate = learning_rate_fn()
+
+ # Later, whenever we perform an optimization step, we increment global_step.
+ ```
+
+ Args:
+ x: A 0-D scalar `Tensor`. Must be one of the following types: `float32`,
+ `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`.
+ boundaries: A list of `Tensor`s or `int`s or `float`s with strictly
+ increasing entries, and with all elements having the same type as `x`.
+ values: A list of `Tensor`s or `float`s or `int`s that specifies the values
+ for the intervals defined by `boundaries`. It should have one more element
+ than `boundaries`, and all elements should have the same type.
+ name: A string. Optional name of the operation. Defaults to
+ 'PiecewiseConstant'.
+
+ Returns:
+ A no-arg function that outputs a 0-D Tensor. The output of the no-arg
+ function is `values[0]` when `x <= boundaries[0]`,
+ `values[1]` when `x > boundaries[0]` and `x <= boundaries[1]`, ...,
+ and values[-1] when `x > boundaries[-1]`.
+
+ Raises:
+ ValueError: if types of `x` and `boundaries` do not match, or types of all
+ `values` do not match or
+ the number of elements in the lists does not match.
+ """
+ if len(boundaries) != len(values) - 1:
+ raise ValueError(
+ "The length of boundaries should be 1 less than the length of values")
+ def decayed_lr(x, boundaries, values, name):
+ """Helper to recompute learning rate; most helpful in eager-mode."""
+ with ops.name_scope(name, "PiecewiseConstant",
+ [x, boundaries, values, name]) as name:
+ boundaries = ops.convert_n_to_tensor(boundaries)
+ values = ops.convert_n_to_tensor(values)
+ x_recomp = ops.convert_to_tensor(x)
+ # Avoid explicit conversion to x's dtype. This could result in faulty
+ # comparisons, for example if floats are converted to integers.
+ for i, b in enumerate(boundaries):
+ if b.dtype.base_dtype != x_recomp.dtype.base_dtype:
+ # We can promote int32 boundaries to int64 without loss of precision.
+ # This covers the most common case where the user passes in boundaries
+ # as an array of Python integers.
+ if (b.dtype.base_dtype == dtypes.int32 and
+ x_recomp.dtype.base_dtype == dtypes.int64):
+ b = math_ops.cast(b, x_recomp.dtype.base_dtype)
+ boundaries[i] = b
+ else:
+ raise ValueError(
+ "Boundaries (%s) must have the same dtype as x (%s)." %
+ (b.dtype.base_dtype, x_recomp.dtype.base_dtype))
+ # TODO(rdipietro): Ensure that boundaries' elements strictly increases.
+ for v in values[1:]:
+ if v.dtype.base_dtype != values[0].dtype.base_dtype:
+ raise ValueError(
+ "Values must have elements all with the same dtype (%s vs %s)." %
+ (values[0].dtype.base_dtype, v.dtype.base_dtype))
+ pred_fn_pairs = []
+ pred_fn_pairs.append((x_recomp <= boundaries[0], lambda: values[0]))
+ pred_fn_pairs.append((x_recomp > boundaries[-1], lambda: values[-1]))
+ for low, high, v in zip(boundaries[:-1], boundaries[1:], values[1:-1]):
+ # Need to bind v here; can do this with lambda v=v: ...
+ pred = (x_recomp > low) & (x_recomp <= high)
+ pred_fn_pairs.append((pred, lambda v=v: v))
+
+ # The default isn't needed here because our conditions are mutually
+ # exclusive and exhaustive, but tf.case requires it.
+ default = lambda: values[0]
+ return control_flow_ops.case(pred_fn_pairs, default, exclusive=True)
+
+ return functools.partial(decayed_lr, x, boundaries, values, name)
+
+
+@tf_export("train.polynomial_decay", v1=[])
+def polynomial_decay(learning_rate,
+ global_step,
+ decay_steps,
+ end_learning_rate=0.0001,
+ power=1.0,
+ cycle=False,
+ name=None):
+ """Applies a polynomial decay to the learning rate.
+
+ It is commonly observed that a monotonically decreasing learning rate, whose
+ degree of change is carefully chosen, results in a better performing model.
+ This function applies a polynomial decay function to a provided initial
+ `learning_rate` to reach an `end_learning_rate` in the given `decay_steps`.
+
+ It requires a `global_step` value to compute the decayed learning rate. You
+ can just pass a TensorFlow variable that you increment at each training step.
+
+ The function returns a no-arg callable that outputs the decayed learning
+ rate. This can be useful for changing the learning rate value across
+ different invocations of optimizer functions. It is computed as:
+
+ ```python
+ global_step = min(global_step, decay_steps)
+ decayed_learning_rate = (learning_rate - end_learning_rate) *
+ (1 - global_step / decay_steps) ^ (power) +
+ end_learning_rate
+
+ ```
+
+ If `cycle` is True then a multiple of `decay_steps` is used, the first one
+ that is bigger than `global_steps`.
+
+ ```python
+ decay_steps = decay_steps * ceil(global_step / decay_steps)
+ decayed_learning_rate_fn = (learning_rate - end_learning_rate) *
+ (1 - global_step / decay_steps) ^ (power) +
+ end_learning_rate
+ decayed_learning_rate = decayed_learning_rate_fn()
+
+ ```
+
+ Example: decay from 0.1 to 0.01 in 10000 steps using sqrt (i.e. power=0.5):
+
+ ```python
+ ...
+ global_step = tf.Variable(0, trainable=False)
+ starter_learning_rate = 0.1
+ end_learning_rate = 0.01
+ decay_steps = 10000
+ learning_rate_fn = tf.train.polynomial_decay(starter_learning_rate,
+ global_step, decay_steps,
+ end_learning_rate,
+ power=0.5)
+ # Passing global_step to minimize() will increment it at each step.
+ learning_step = (
+ tf.train.GradientDescentOptimizer(learning_rate_fn)
+ .minimize(...my loss..., global_step=global_step)
+ )
+ ```
+
+ Args:
+ learning_rate: A scalar `float32` or `float64` `Tensor` or a
+ Python number. The initial learning rate.
+ global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
+ Global step to use for the decay computation. Must not be negative.
+ decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
+ Must be positive. See the decay computation above.
+ end_learning_rate: A scalar `float32` or `float64` `Tensor` or a
+ Python number. The minimal end learning rate.
+ power: A scalar `float32` or `float64` `Tensor` or a
+ Python number. The power of the polynomial. Defaults to linear, 1.0.
+ cycle: A boolean, whether or not it should cycle beyond decay_steps.
+ name: String. Optional name of the operation. Defaults to
+ 'PolynomialDecay'.
+
+ Returns:
+ A no-arg function that outputs the decayed learning rate, a scalar `Tensor`
+ of the same type as `learning_rate`.
+
+ Raises:
+ ValueError: if `global_step` is not supplied.
+ """
+ if global_step is None:
+ raise ValueError("global_step is required for polynomial_decay.")
+ def decayed_lr(learning_rate, global_step, decay_steps, end_learning_rate,
+ power, cycle, name):
+ """Helper to recompute learning rate; most helpful in eager-mode."""
+ with ops.name_scope(
+ name, "PolynomialDecay",
+ [learning_rate, global_step, decay_steps, end_learning_rate, power]
+ ) as name:
+ learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
+ dtype = learning_rate.dtype
+ end_learning_rate = math_ops.cast(end_learning_rate, dtype)
+ power = math_ops.cast(power, dtype)
+
+ global_step_recomp = math_ops.cast(global_step, dtype)
+ decay_steps_recomp = math_ops.cast(decay_steps, dtype)
+ if cycle:
+ # Find the first multiple of decay_steps that is bigger than
+ # global_step. If global_step is zero set the multiplier to 1
+ multiplier = control_flow_ops.cond(
+ math_ops.equal(global_step_recomp, 0), lambda: 1.0,
+ lambda: math_ops.ceil(global_step_recomp / decay_steps))
+ decay_steps_recomp = math_ops.multiply(decay_steps_recomp, multiplier)
+ else:
+ # Make sure that the global_step used is not bigger than decay_steps.
+ global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
+
+ p = math_ops.div(global_step_recomp, decay_steps_recomp)
+ return math_ops.add(
+ math_ops.multiply(learning_rate - end_learning_rate,
+ math_ops.pow(1 - p, power)),
+ end_learning_rate,
+ name=name)
+
+ return functools.partial(
+ decayed_lr, learning_rate, global_step, decay_steps, end_learning_rate,
+ power, cycle, name)
+
+
+@tf_export("train.natural_exp_decay", v1=[])
+def natural_exp_decay(learning_rate,
+ global_step,
+ decay_steps,
+ decay_rate,
+ staircase=False,
+ name=None):
+ """Applies natural exponential decay to the initial learning rate.
+
+ When training a model, it is often recommended to lower the learning rate as
+ the training progresses. This function applies an exponential decay function
+ to a provided initial learning rate. It requires an `global_step` value to
+ compute the decayed learning rate. You can just pass a TensorFlow variable
+ that you increment at each training step.
+
+ The function returns a no-arg callable that produces the decayed learning
+ rate. This can be useful for changing the learning rate value across
+ different invocations of optimizer functions. It is computed as:
+
+ ```python
+ decayed_learning_rate = learning_rate * exp(-decay_rate * global_step /
+ decay_step)
+ ```
+
+ or, if `staircase` is `True`, as:
+
+ ```python
+ decayed_learning_rate = learning_rate * exp(-decay_rate * floor(global_step /
+ decay_step))
+ ```
+
+ Example: decay exponentially with a base of 0.96:
+
+ ```python
+ ...
+ global_step = tf.Variable(0, trainable=False)
+ learning_rate = 0.1
+ decay_steps = 5
+ k = 0.5
+ learning_rate_fn = tf.train.natural_exp_decay(learning_rate, global_step,
+ decay_steps, k)
+
+ # Passing global_step to minimize() will increment it at each step.
+ learning_step = (
+ tf.train.GradientDescentOptimizer(learning_rate_fn)
+ .minimize(...my loss..., global_step=global_step)
+ )
+ ```
+
+ Args:
+ learning_rate: A scalar `float32` or `float64` `Tensor` or a
+ Python number. The initial learning rate.
+ global_step: A Python number.
+ Global step to use for the decay computation. Must not be negative.
+ decay_steps: How often to apply decay.
+ decay_rate: A Python number. The decay rate.
+ staircase: Whether to apply decay in a discrete staircase, as opposed to
+ continuous, fashion.
+ name: String. Optional name of the operation. Defaults to
+ 'ExponentialTimeDecay'.
+
+ Returns:
+ A no-arg function that outputs the decayed learning rate, a scalar `Tensor`
+ of the same type as `learning_rate`.
+
+ Raises:
+ ValueError: if `global_step` is not supplied.
+ """
+ if global_step is None:
+ raise ValueError("global_step is required for natural_exp_decay.")
+ def decayed_lr(learning_rate, global_step, decay_steps, decay_rate, staircase,
+ name):
+ """Helper to recompute learning rate; most helpful in eager-mode."""
+ with ops.name_scope(name, "NaturalExpDecay",
+ [learning_rate, global_step, decay_rate]) as name:
+ learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
+ dtype = learning_rate.dtype
+ decay_steps = math_ops.cast(decay_steps, dtype)
+ decay_rate = math_ops.cast(decay_rate, dtype)
+
+ global_step_recomp = math_ops.cast(global_step, dtype)
+ p = global_step_recomp / decay_steps
+ if staircase:
+ p = math_ops.floor(p)
+ exponent = math_ops.exp(
+ math_ops.multiply(math_ops.negative(decay_rate), p))
+ return math_ops.multiply(learning_rate, exponent, name=name)
+
+ return functools.partial(decayed_lr, learning_rate, global_step, decay_steps,
+ decay_rate, staircase, name)
+
+
+@tf_export("train.inverse_time_decay", v1=[])
+def inverse_time_decay(learning_rate,
+ global_step,
+ decay_steps,
+ decay_rate,
+ staircase=False,
+ name=None):
+ """Applies inverse time decay to the initial learning rate.
+
+ When training a model, it is often recommended to lower the learning rate as
+ the training progresses. This function applies an inverse decay function
+ to a provided initial learning rate. It requires an `global_step` value to
+ compute the decayed learning rate. You can just pass a TensorFlow variable
+ that you increment at each training step.
+
+ The function returns a no-arg callable that produces the decayed learning
+ rate. This can be useful for changing the learning rate value across
+ different invocations of optimizer functions. It is computed as:
+
+ ```python
+ decayed_learning_rate = learning_rate / (1 + decay_rate * global_step /
+ decay_step)
+ ```
+
+ or, if `staircase` is `True`, as:
+
+ ```python
+ decayed_learning_rate = learning_rate / (1 + decay_rate * floor(global_step /
+ decay_step))
+ ```
+
+ Example: decay 1/t with a rate of 0.5:
+
+ ```python
+ ...
+ global_step = tf.Variable(0, trainable=False)
+ learning_rate = 0.1
+ decay_steps = 1.0
+ decay_rate = 0.5
+ learning_rate_fn = tf.train.inverse_time_decay(learning_rate, global_step,
+ decay_steps, decay_rate)
+
+ # Passing global_step to minimize() will increment it at each step.
+ learning_step = (
+ tf.train.GradientDescentOptimizer(learning_rate_fn)
+ .minimize(...my loss..., global_step=global_step)
+ )
+ ```
+
+ Args:
+ learning_rate: A scalar `float32` or `float64` `Tensor` or a
+ Python number. The initial learning rate.
+ global_step: A Python number.
+ Global step to use for the decay computation. Must not be negative.
+ decay_steps: How often to apply decay.
+ decay_rate: A Python number. The decay rate.
+ staircase: Whether to apply decay in a discrete staircase, as opposed to
+ continuous, fashion.
+ name: String. Optional name of the operation. Defaults to
+ 'InverseTimeDecay'.
+
+ Returns:
+ A no-arg function that outputs the decayed learning rate, a scalar `Tensor`
+ of the same type as `learning_rate`.
+
+ Raises:
+ ValueError: if `global_step` is not supplied.
+ """
+ if global_step is None:
+ raise ValueError("global_step is required for inverse_time_decay.")
+ def decayed_lr(learning_rate, global_step, decay_steps, decay_rate, staircase,
+ name):
+ """Helper to recompute learning rate; most helpful in eager-mode."""
+ with ops.name_scope(name, "InverseTimeDecay",
+ [learning_rate, global_step, decay_rate]) as name:
+ learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
+ dtype = learning_rate.dtype
+ decay_steps = math_ops.cast(decay_steps, dtype)
+ decay_rate = math_ops.cast(decay_rate, dtype)
+
+ global_step_recomp = math_ops.cast(global_step, dtype)
+ p = global_step_recomp / decay_steps
+ if staircase:
+ p = math_ops.floor(p)
+ const = math_ops.cast(constant_op.constant(1), dtype)
+ denom = math_ops.add(const, math_ops.multiply(decay_rate, p))
+ return math_ops.div(learning_rate, denom, name=name)
+
+ return functools.partial(decayed_lr, learning_rate, global_step, decay_steps,
+ decay_rate, staircase, name)
+
+
+@tf_export("train.cosine_decay", v1=[])
+def cosine_decay(learning_rate, global_step, decay_steps, alpha=0.0,
+ name=None):
+ """Applies cosine decay to the learning rate.
+
+ See [Loshchilov & Hutter, ICLR2016], SGDR: Stochastic Gradient Descent
+ with Warm Restarts. https://arxiv.org/abs/1608.03983
+
+ When training a model, it is often recommended to lower the learning rate as
+ the training progresses. This function applies a cosine decay function
+ to a provided initial learning rate. It requires a `global_step` value to
+ compute the decayed learning rate. You can just pass a TensorFlow variable
+ that you increment at each training step.
+
+ The function returns a no-arg callable that produces the decayed learning
+ rate. This can be useful for changing the learning rate value across
+ different invocations of optimizer functions. It is computed as:
+
+ ```python
+ global_step = min(global_step, decay_steps)
+ cosine_decay = 0.5 * (1 + cos(pi * global_step / decay_steps))
+ decayed = (1 - alpha) * cosine_decay + alpha
+ decayed_learning_rate = learning_rate * decayed
+ ```
+
+ Example usage:
+ ```python
+ decay_steps = 1000
+ lr_decayed_fn = tf.train.cosine_decay(learning_rate, global_step, decay_steps)
+ ```
+
+ Args:
+ learning_rate: A scalar `float32` or `float64` Tensor or a Python number.
+ The initial learning rate.
+ global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
+ Global step to use for the decay computation.
+ decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
+ Number of steps to decay over.
+ alpha: A scalar `float32` or `float64` Tensor or a Python number.
+ Minimum learning rate value as a fraction of learning_rate.
+ name: String. Optional name of the operation. Defaults to 'CosineDecay'.
+ Returns:
+ A no-arg function that outputs the decayed learning rate, a scalar `Tensor`
+ of the same type as `learning_rate`.
+ Raises:
+ ValueError: if `global_step` is not supplied.
+ """
+ if global_step is None:
+ raise ValueError("cosine decay requires global_step")
+ def decayed_lr(learning_rate, global_step, decay_steps, alpha, name):
+ """Helper to recompute learning rate; most helpful in eager-mode."""
+ with ops.name_scope(name, "CosineDecay",
+ [learning_rate, global_step]) as name:
+ learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
+ dtype = learning_rate.dtype
+ decay_steps = math_ops.cast(decay_steps, dtype)
+
+ global_step_recomp = math_ops.cast(global_step, dtype)
+ global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
+ completed_fraction = global_step_recomp / decay_steps
+ cosine_decayed = 0.5 * (1.0 + math_ops.cos(
+ constant_op.constant(math.pi) * completed_fraction))
+
+ decayed = (1 - alpha) * cosine_decayed + alpha
+ return math_ops.multiply(learning_rate, decayed)
+
+ return functools.partial(decayed_lr, learning_rate, global_step, decay_steps,
+ alpha, name)
+
+
+@tf_export("train.cosine_decay_restarts", v1=[])
+def cosine_decay_restarts(learning_rate,
+ global_step,
+ first_decay_steps,
+ t_mul=2.0,
+ m_mul=1.0,
+ alpha=0.0,
+ name=None):
+ """Applies cosine decay with restarts to the learning rate.
+
+ See [Loshchilov & Hutter, ICLR2016], SGDR: Stochastic Gradient Descent
+ with Warm Restarts. https://arxiv.org/abs/1608.03983
+
+ When training a model, it is often recommended to lower the learning rate as
+ the training progresses. This function applies a cosine decay function with
+ restarts to a provided initial learning rate. It requires a `global_step`
+ value to compute the decayed learning rate. You can just pass a TensorFlow
+ variable that you increment at each training step.
+
+ The function returns a no-arg callable that produces the decayed learning
+ rate while taking into account possible warm restarts. This can be useful for
+ changing the learning rate value across different invocations of optimizer
+ functions.
+
+ The learning rate multiplier first decays
+ from 1 to `alpha` for `first_decay_steps` steps. Then, a warm
+ restart is performed. Each new warm restart runs for `t_mul` times more steps
+ and with `m_mul` times smaller initial learning rate.
+
+ Example usage:
+ ```python
+ first_decay_steps = 1000
+ lr_decayed_fn = tf.train.cosine_decay_restarts(learning_rate, global_step,
+ first_decay_steps)
+ ```
+
+ Args:
+ learning_rate: A scalar `float32` or `float64` Tensor or a Python number.
+ The initial learning rate.
+ global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
+ Global step to use for the decay computation.
+ first_decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
+ Number of steps to decay over.
+ t_mul: A scalar `float32` or `float64` `Tensor` or a Python number.
+ Used to derive the number of iterations in the i-th period
+ m_mul: A scalar `float32` or `float64` `Tensor` or a Python number.
+ Used to derive the initial learning rate of the i-th period:
+ alpha: A scalar `float32` or `float64` Tensor or a Python number.
+ Minimum learning rate value as a fraction of the learning_rate.
+ name: String. Optional name of the operation. Defaults to 'SGDRDecay'.
+ Returns:
+ A no-arg function that outputs the decayed learning rate, a scalar `Tensor`
+ of the same type as `learning_rate`.
+
+ Raises:
+ ValueError: if `global_step` is not supplied.
+ """
+ if global_step is None:
+ raise ValueError("cosine decay restarts requires global_step")
+ def decayed_lr(learning_rate, global_step, first_decay_steps, t_mul, m_mul,
+ alpha, name):
+ """Helper to recompute learning rate; most helpful in eager-mode."""
+ with ops.name_scope(name, "SGDRDecay", [learning_rate, global_step]
+ ) as name:
+ learning_rate = ops.convert_to_tensor(
+ learning_rate, name="initial_learning_rate")
+ dtype = learning_rate.dtype
+ first_decay_steps = math_ops.cast(first_decay_steps, dtype)
+ alpha = math_ops.cast(alpha, dtype)
+ t_mul = math_ops.cast(t_mul, dtype)
+ m_mul = math_ops.cast(m_mul, dtype)
+
+ global_step_recomp = math_ops.cast(global_step, dtype)
+ completed_fraction = global_step_recomp / first_decay_steps
+
+ def compute_step(completed_fraction, geometric=False):
+ """Helper for `cond` operation."""
+ if geometric:
+ i_restart = math_ops.floor(
+ math_ops.log(1.0 - completed_fraction * (1.0 - t_mul)) /
+ math_ops.log(t_mul))
+
+ sum_r = (1.0 - t_mul**i_restart) / (1.0 - t_mul)
+ completed_fraction = (completed_fraction - sum_r) / t_mul**i_restart
+
+ else:
+ i_restart = math_ops.floor(completed_fraction)
+ completed_fraction -= i_restart
+
+ return i_restart, completed_fraction
+
+ i_restart, completed_fraction = control_flow_ops.cond(
+ math_ops.equal(t_mul, 1.0),
+ lambda: compute_step(completed_fraction, geometric=False),
+ lambda: compute_step(completed_fraction, geometric=True))
+
+ m_fac = m_mul**i_restart
+ cosine_decayed = 0.5 * m_fac * (1.0 + math_ops.cos(
+ constant_op.constant(math.pi) * completed_fraction))
+ decayed = (1 - alpha) * cosine_decayed + alpha
+
+ return math_ops.multiply(learning_rate, decayed, name=name)
+
+ return functools.partial(decayed_lr, learning_rate, global_step,
+ first_decay_steps, t_mul, m_mul, alpha, name)
+
+
+@tf_export("train.linear_cosine_decay", v1=[])
+def linear_cosine_decay(learning_rate,
+ global_step,
+ decay_steps,
+ num_periods=0.5,
+ alpha=0.0,
+ beta=0.001,
+ name=None):
+ """Applies linear cosine decay to the learning rate.
+
+ See [Bello et al., ICML2017] Neural Optimizer Search with RL.
+ https://arxiv.org/abs/1709.07417
+
+ For the idea of warm starts here controlled by `num_periods`,
+ see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent
+ with Warm Restarts. https://arxiv.org/abs/1608.03983
+
+ Note that linear cosine decay is more aggressive than cosine decay and
+ larger initial learning rates can typically be used.
+
+ When training a model, it is often recommended to lower the learning rate as
+ the training progresses. This function applies a linear cosine decay function
+ to a provided initial learning rate. It requires a `global_step` value to
+ compute the decayed learning rate. You can just pass a TensorFlow variable
+ that you increment at each training step.
+
+ The function returns a no-arg callable that produces the decayed learning
+ rate. This can be useful for changing the learning rate value across
+ different invocations of optimizer functions. It is computed as:
+
+ ```python
+ global_step = min(global_step, decay_steps)
+ linear_decay = (decay_steps - global_step) / decay_steps)
+ cosine_decay = 0.5 * (
+ 1 + cos(pi * 2 * num_periods * global_step / decay_steps))
+ decayed = (alpha + linear_decay) * cosine_decay + beta
+ decayed_learning_rate = learning_rate * decayed
+ ```
+
+ Example usage:
+ ```python
+ decay_steps = 1000
+ lr_decayed_fn = tf.train.linear_cosine_decay(learning_rate, global_step,
+ decay_steps)
+ ```
+
+ Args:
+ learning_rate: A scalar `float32` or `float64` Tensor or a Python number.
+ The initial learning rate.
+ global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
+ Global step to use for the decay computation.
+ decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
+ Number of steps to decay over.
+ num_periods: Number of periods in the cosine part of the decay.
+ See computation above.
+ alpha: See computation above.
+ beta: See computation above.
+ name: String. Optional name of the operation. Defaults to
+ 'LinearCosineDecay'.
+ Returns:
+ A no-arg function that outputs the decayed learning rate, a scalar `Tensor`
+ of the same type as `learning_rate`.
+ Raises:
+ ValueError: if `global_step` is not supplied.
+ """
+ if global_step is None:
+ raise ValueError("linear cosine decay requires global_step")
+ def decayed_lr(learning_rate, global_step, decay_steps, num_periods, alpha,
+ beta, name):
+ """Helper to recompute learning rate; most helpful in eager-mode."""
+ with ops.name_scope(name, "LinearCosineDecay",
+ [learning_rate, global_step]) as name:
+ learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
+ dtype = learning_rate.dtype
+ decay_steps = math_ops.cast(decay_steps, dtype)
+ num_periods = math_ops.cast(num_periods, dtype)
+ alpha = math_ops.cast(alpha, dtype)
+ beta = math_ops.cast(beta, dtype)
+
+ global_step_recomp = math_ops.cast(global_step, dtype)
+ global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
+ linear_decayed = (decay_steps - global_step_recomp) / decay_steps
+ completed_fraction = global_step_recomp / decay_steps
+ fraction = 2.0 * num_periods * completed_fraction
+ cosine_decayed = 0.5 * (
+ 1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
+
+ linear_cosine_decayed = (alpha + linear_decayed) * cosine_decayed + beta
+ return math_ops.multiply(learning_rate, linear_cosine_decayed, name=name)
+
+ return functools.partial(decayed_lr, learning_rate, global_step, decay_steps,
+ num_periods, alpha, beta, name)
+
+
+@tf_export("train.noisy_linear_cosine_decay", v1=[])
+def noisy_linear_cosine_decay(learning_rate,
+ global_step,
+ decay_steps,
+ initial_variance=1.0,
+ variance_decay=0.55,
+ num_periods=0.5,
+ alpha=0.0,
+ beta=0.001,
+ name=None):
+ """Applies noisy linear cosine decay to the learning rate.
+
+ See [Bello et al., ICML2017] Neural Optimizer Search with RL.
+ https://arxiv.org/abs/1709.07417
+
+ For the idea of warm starts here controlled by `num_periods`,
+ see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent
+ with Warm Restarts. https://arxiv.org/abs/1608.03983
+
+ Note that linear cosine decay is more aggressive than cosine decay and
+ larger initial learning rates can typically be used.
+
+ When training a model, it is often recommended to lower the learning rate as
+ the training progresses. This function applies a noisy linear
+ cosine decay function to a provided initial learning rate.
+ It requires a `global_step` value to compute the decayed learning rate.
+ You can just pass a TensorFlow variable that you increment at each
+ training step.
+
+ The function returns a no-arg callable that produces the decayed learning
+ rate. This can be useful for changing the learning rate value across
+ different invocations of optimizer functions. It is computed as:
+
+ ```python
+ global_step = min(global_step, decay_steps)
+ linear_decay = (decay_steps - global_step) / decay_steps)
+ cosine_decay = 0.5 * (
+ 1 + cos(pi * 2 * num_periods * global_step / decay_steps))
+ decayed = (alpha + linear_decay + eps_t) * cosine_decay + beta
+ decayed_learning_rate = learning_rate * decayed
+ ```
+ where eps_t is 0-centered gaussian noise with variance
+ initial_variance / (1 + global_step) ** variance_decay
+
+ Example usage:
+ ```python
+ decay_steps = 1000
+ lr_decayed_fn = tf.train.noisy_linear_cosine_decay(learning_rate, global_step,
+ decay_steps)
+ ```
+
+ Args:
+ learning_rate: A scalar `float32` or `float64` Tensor or a Python number.
+ The initial learning rate.
+ global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
+ Global step to use for the decay computation.
+ decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
+ Number of steps to decay over.
+ initial_variance: initial variance for the noise. See computation above.
+ variance_decay: decay for the noise's variance. See computation above.
+ num_periods: Number of periods in the cosine part of the decay.
+ See computation above.
+ alpha: See computation above.
+ beta: See computation above.
+ name: String. Optional name of the operation. Defaults to
+ 'NoisyLinearCosineDecay'.
+ Returns:
+ A no-arg function that outputs the decayed learning rate, a scalar `Tensor`
+ of the same type as `learning_rate`.
+ Raises:
+ ValueError: if `global_step` is not supplied.
+ """
+ if global_step is None:
+ raise ValueError("noisy linear cosine decay requires global_step")
+ def decayed_lr(learning_rate, global_step, decay_steps, initial_variance,
+ variance_decay, num_periods, alpha, beta, name):
+ """Helper to recompute learning rate; most helpful in eager-mode."""
+ with ops.name_scope(name, "NoisyLinearCosineDecay",
+ [learning_rate, global_step]) as name:
+ learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
+ dtype = learning_rate.dtype
+ decay_steps = math_ops.cast(decay_steps, dtype)
+ initial_variance = math_ops.cast(initial_variance, dtype)
+ variance_decay = math_ops.cast(variance_decay, dtype)
+ num_periods = math_ops.cast(num_periods, dtype)
+ alpha = math_ops.cast(alpha, dtype)
+ beta = math_ops.cast(beta, dtype)
+
+ global_step_recomp = math_ops.cast(global_step, dtype)
+ global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
+ linear_decayed = (decay_steps - global_step_recomp) / decay_steps
+ variance = initial_variance / (
+ math_ops.pow(1.0 + global_step_recomp, variance_decay))
+ std = math_ops.sqrt(variance)
+ noisy_linear_decayed = (
+ linear_decayed + random_ops.random_normal(
+ linear_decayed.shape, stddev=std))
+
+ completed_fraction = global_step_recomp / decay_steps
+ fraction = 2.0 * num_periods * completed_fraction
+ cosine_decayed = 0.5 * (
+ 1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
+ noisy_linear_cosine_decayed = (
+ (alpha + noisy_linear_decayed) * cosine_decayed + beta)
+
+ return math_ops.multiply(
+ learning_rate, noisy_linear_cosine_decayed, name=name)
+
+ return functools.partial(decayed_lr, learning_rate, global_step, decay_steps,
+ initial_variance, variance_decay, num_periods, alpha,
+ beta, name)
diff --git a/tensorflow/python/training/learning_rate_decay_v2_test.py b/tensorflow/python/training/learning_rate_decay_v2_test.py
new file mode 100644
index 0000000000..0f2d60dafc
--- /dev/null
+++ b/tensorflow/python/training/learning_rate_decay_v2_test.py
@@ -0,0 +1,497 @@
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+"""Functional test for learning rate decay."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import math
+
+from tensorflow.python.eager import context
+from tensorflow.python.framework import test_util
+# Import resource_variable_ops for the variables-to-tensor implicit conversion.
+from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import googletest
+from tensorflow.python.training import learning_rate_decay_v2
+
+
+class LRDecayTestV2(test_util.TensorFlowTestCase):
+
+ @test_util.run_in_graph_and_eager_modes
+ def testContinuous(self):
+ self.evaluate(variables.global_variables_initializer())
+ step = 5
+ decayed_lr = learning_rate_decay_v2.exponential_decay(0.05, step, 10, 0.96)
+ expected = .05 * 0.96**(5.0 / 10.0)
+ self.assertAllClose(self.evaluate(decayed_lr()), expected, 1e-6)
+
+ @test_util.run_in_graph_and_eager_modes
+ def testStaircase(self):
+ if context.executing_eagerly():
+ step = resource_variable_ops.ResourceVariable(0)
+ self.evaluate(variables.global_variables_initializer())
+ decayed_lr = learning_rate_decay_v2.exponential_decay(
+ .1, step, 3, 0.96, staircase=True)
+
+ # No change to learning rate due to staircase
+ expected = .1
+ self.evaluate(step.assign(1))
+ self.assertAllClose(self.evaluate(decayed_lr()), expected, 1e-6)
+
+ expected = .1
+ self.evaluate(step.assign(2))
+ self.assertAllClose(self.evaluate(decayed_lr()), expected, 1e-6)
+
+ # Decayed learning rate
+ expected = .1 * 0.96 ** (100 // 3)
+ self.evaluate(step.assign(100))
+ self.assertAllClose(self.evaluate(decayed_lr()), expected, 1e-6)
+
+ def testVariables(self):
+ with self.test_session():
+ step = variables.Variable(1)
+ assign_1 = step.assign(1)
+ assign_2 = step.assign(2)
+ assign_100 = step.assign(100)
+ decayed_lr = learning_rate_decay_v2.exponential_decay(.1, step, 3, 0.96,
+ staircase=True)
+ variables.global_variables_initializer().run()
+ # No change to learning rate
+ assign_1.op.run()
+ self.assertAllClose(decayed_lr().eval(), .1, 1e-6)
+ assign_2.op.run()
+ self.assertAllClose(decayed_lr().eval(), .1, 1e-6)
+ # Decayed learning rate
+ assign_100.op.run()
+ expected = .1 * 0.96 ** (100 // 3)
+ self.assertAllClose(decayed_lr().eval(), expected, 1e-6)
+
+ @test_util.run_in_graph_and_eager_modes
+ def testPiecewiseConstant(self):
+ x = resource_variable_ops.ResourceVariable(-999)
+ decayed_lr = learning_rate_decay_v2.piecewise_constant(
+ x, [100, 110, 120], [1.0, 0.1, 0.01, 0.001])
+
+ self.evaluate(variables.global_variables_initializer())
+
+ self.assertAllClose(self.evaluate(decayed_lr()), 1.0, 1e-6)
+ self.evaluate(x.assign(100))
+ self.assertAllClose(self.evaluate(decayed_lr()), 1.0, 1e-6)
+ self.evaluate(x.assign(105))
+ self.assertAllClose(self.evaluate(decayed_lr()), 0.1, 1e-6)
+ self.evaluate(x.assign(110))
+ self.assertAllClose(self.evaluate(decayed_lr()), 0.1, 1e-6)
+ self.evaluate(x.assign(120))
+ self.assertAllClose(self.evaluate(decayed_lr()), 0.01, 1e-6)
+ self.evaluate(x.assign(999))
+ self.assertAllClose(self.evaluate(decayed_lr()), 0.001, 1e-6)
+
+ @test_util.run_in_graph_and_eager_modes
+ def testPiecewiseConstantEdgeCases(self):
+ x_int = resource_variable_ops.ResourceVariable(
+ 0, dtype=variables.dtypes.int32)
+ boundaries, values = [-1.0, 1.0], [1, 2, 3]
+ with self.assertRaises(ValueError):
+ decayed_lr = learning_rate_decay_v2.piecewise_constant(
+ x_int, boundaries, values)
+ decayed_lr()
+
+ x = resource_variable_ops.ResourceVariable(0.0)
+ boundaries, values = [-1.0, 1.0], [1.0, 2, 3]
+ with self.assertRaises(ValueError):
+ decayed_lr = learning_rate_decay_v2.piecewise_constant(
+ x, boundaries, values)()
+ decayed_lr()
+
+ # Test that ref types are valid.
+ if not context.executing_eagerly():
+ x = variables.Variable(0.0)
+ x_ref = x.op.outputs[0] # float32_ref tensor should be accepted
+ boundaries, values = [1.0, 2.0], [1, 2, 3]
+ learning_rate_decay_v2.piecewise_constant(x_ref, boundaries, values)
+
+ # Test casting boundaries from int32 to int64.
+ x_int64 = resource_variable_ops.ResourceVariable(
+ 0, dtype=variables.dtypes.int64)
+ boundaries, values = [1, 2, 3], [0.4, 0.5, 0.6, 0.7]
+ decayed_lr = learning_rate_decay_v2.piecewise_constant(
+ x_int64, boundaries, values)
+
+ self.evaluate(variables.global_variables_initializer())
+ self.assertAllClose(self.evaluate(decayed_lr()), 0.4, 1e-6)
+ self.evaluate(x_int64.assign(1))
+ self.assertAllClose(self.evaluate(decayed_lr()), 0.4, 1e-6)
+ self.evaluate(x_int64.assign(2))
+ self.assertAllClose(self.evaluate(decayed_lr()), 0.5, 1e-6)
+ self.evaluate(x_int64.assign(3))
+ self.assertAllClose(self.evaluate(decayed_lr()), 0.6, 1e-6)
+ self.evaluate(x_int64.assign(4))
+ self.assertAllClose(self.evaluate(decayed_lr()), 0.7, 1e-6)
+
+
+class LinearDecayTestV2(test_util.TensorFlowTestCase):
+
+ @test_util.run_in_graph_and_eager_modes
+ def testHalfWay(self):
+ step = 5
+ lr = 0.05
+ end_lr = 0.0
+ decayed_lr = learning_rate_decay_v2.polynomial_decay(lr, step, 10, end_lr)
+ expected = lr * 0.5
+ self.assertAllClose(self.evaluate(decayed_lr()), expected, 1e-6)
+
+ @test_util.run_in_graph_and_eager_modes
+ def testEnd(self):
+ step = 10
+ lr = 0.05
+ end_lr = 0.001
+ decayed_lr = learning_rate_decay_v2.polynomial_decay(lr, step, 10, end_lr)
+ expected = end_lr
+ self.assertAllClose(self.evaluate(decayed_lr()), expected, 1e-6)
+
+ @test_util.run_in_graph_and_eager_modes
+ def testHalfWayWithEnd(self):
+ step = 5
+ lr = 0.05
+ end_lr = 0.001
+ decayed_lr = learning_rate_decay_v2.polynomial_decay(lr, step, 10, end_lr)
+ expected = (lr + end_lr) * 0.5
+ self.assertAllClose(self.evaluate(decayed_lr()), expected, 1e-6)
+
+ @test_util.run_in_graph_and_eager_modes
+ def testBeyondEnd(self):
+ step = 15
+ lr = 0.05
+ end_lr = 0.001
+ decayed_lr = learning_rate_decay_v2.polynomial_decay(lr, step, 10, end_lr)
+ expected = end_lr
+ self.assertAllClose(self.evaluate(decayed_lr()), expected, 1e-6)
+
+ @test_util.run_in_graph_and_eager_modes
+ def testBeyondEndWithCycle(self):
+ step = 15
+ lr = 0.05
+ end_lr = 0.001
+ decayed_lr = learning_rate_decay_v2.polynomial_decay(
+ lr, step, 10, end_lr, cycle=True)
+ expected = (lr - end_lr) * 0.25 + end_lr
+ self.assertAllClose(self.evaluate(decayed_lr()), expected, 1e-6)
+
+
+class SqrtDecayTestV2(test_util.TensorFlowTestCase):
+
+ @test_util.run_in_graph_and_eager_modes
+ def testHalfWay(self):
+ step = 5
+ lr = 0.05
+ end_lr = 0.0
+ power = 0.5
+ decayed_lr = learning_rate_decay_v2.polynomial_decay(
+ lr, step, 10, end_lr, power=power)
+ expected = lr * 0.5**power
+ self.assertAllClose(self.evaluate(decayed_lr()), expected, 1e-6)
+
+ @test_util.run_in_graph_and_eager_modes
+ def testEnd(self):
+ step = 10
+ lr = 0.05
+ end_lr = 0.001
+ power = 0.5
+ decayed_lr = learning_rate_decay_v2.polynomial_decay(
+ lr, step, 10, end_lr, power=power)
+ expected = end_lr
+ self.assertAllClose(self.evaluate(decayed_lr()), expected, 1e-6)
+
+ @test_util.run_in_graph_and_eager_modes
+ def testHalfWayWithEnd(self):
+ step = 5
+ lr = 0.05
+ end_lr = 0.001
+ power = 0.5
+ decayed_lr = learning_rate_decay_v2.polynomial_decay(
+ lr, step, 10, end_lr, power=power)
+ expected = (lr - end_lr) * 0.5**power + end_lr
+ self.assertAllClose(self.evaluate(decayed_lr()), expected, 1e-6)
+
+ @test_util.run_in_graph_and_eager_modes
+ def testBeyondEnd(self):
+ step = 15
+ lr = 0.05
+ end_lr = 0.001
+ power = 0.5
+ decayed_lr = learning_rate_decay_v2.polynomial_decay(
+ lr, step, 10, end_lr, power=power)
+ expected = end_lr
+ self.assertAllClose(self.evaluate(decayed_lr()), expected, 1e-6)
+
+ @test_util.run_in_graph_and_eager_modes
+ def testBeyondEndWithCycle(self):
+ step = 15
+ lr = 0.05
+ end_lr = 0.001
+ power = 0.5
+ decayed_lr = learning_rate_decay_v2.polynomial_decay(
+ lr, step, 10, end_lr, power=power, cycle=True)
+ expected = (lr - end_lr) * 0.25**power + end_lr
+ self.assertAllClose(self.evaluate(decayed_lr()), expected, 1e-6)
+
+
+class PolynomialDecayTestV2(test_util.TensorFlowTestCase):
+
+ @test_util.run_in_graph_and_eager_modes
+ def testBeginWithCycle(self):
+ lr = 0.001
+ decay_steps = 10
+ step = 0
+ decayed_lr = learning_rate_decay_v2.polynomial_decay(
+ lr, step, decay_steps, cycle=True)
+ expected = lr
+ self.assertAllClose(self.evaluate(decayed_lr()), expected, 1e-6)
+
+
+class ExponentialDecayTestV2(test_util.TensorFlowTestCase):
+
+ @test_util.run_in_graph_and_eager_modes
+ def testDecay(self):
+ initial_lr = 0.1
+ k = 10
+ decay_rate = 0.96
+ step = resource_variable_ops.ResourceVariable(0)
+ decayed_lr = learning_rate_decay_v2.natural_exp_decay(initial_lr, step, k,
+ decay_rate)
+
+ self.evaluate(variables.global_variables_initializer())
+ for i in range(k + 1):
+ expected = initial_lr * math.exp(-i / k * decay_rate)
+ self.assertAllClose(self.evaluate(decayed_lr()), expected, 1e-6)
+ self.evaluate(step.assign_add(1))
+
+ @test_util.run_in_graph_and_eager_modes
+ def testStaircase(self):
+ initial_lr = 0.1
+ k = 10
+ decay_rate = 0.96
+ step = resource_variable_ops.ResourceVariable(0)
+ decayed_lr = learning_rate_decay_v2.natural_exp_decay(
+ initial_lr, step, k, decay_rate, staircase=True)
+
+ self.evaluate(variables.global_variables_initializer())
+ for i in range(k + 1):
+ expected = initial_lr * math.exp(-decay_rate * (i // k))
+ self.assertAllClose(self.evaluate(decayed_lr()), expected, 1e-6)
+ self.evaluate(step.assign_add(1))
+
+
+class InverseDecayTestV2(test_util.TensorFlowTestCase):
+
+ @test_util.run_in_graph_and_eager_modes
+ def testDecay(self):
+ initial_lr = 0.1
+ k = 10
+ decay_rate = 0.96
+ step = resource_variable_ops.ResourceVariable(0)
+ decayed_lr = learning_rate_decay_v2.inverse_time_decay(initial_lr, step, k,
+ decay_rate)
+
+ self.evaluate(variables.global_variables_initializer())
+ for i in range(k + 1):
+ expected = initial_lr / (1 + i / k * decay_rate)
+ self.assertAllClose(self.evaluate(decayed_lr()), expected, 1e-6)
+ self.evaluate(step.assign_add(1))
+
+ @test_util.run_in_graph_and_eager_modes
+ def testStaircase(self):
+ initial_lr = 0.1
+ k = 10
+ decay_rate = 0.96
+ step = resource_variable_ops.ResourceVariable(0)
+ decayed_lr = learning_rate_decay_v2.inverse_time_decay(
+ initial_lr, step, k, decay_rate, staircase=True)
+
+ self.evaluate(variables.global_variables_initializer())
+ for i in range(k + 1):
+ expected = initial_lr / (1 + decay_rate * (i // k))
+ self.assertAllClose(self.evaluate(decayed_lr()), expected, 1e-6)
+ self.evaluate(step.assign_add(1))
+
+
+class CosineDecayTestV2(test_util.TensorFlowTestCase):
+
+ def np_cosine_decay(self, step, decay_steps, alpha=0.0):
+ step = min(step, decay_steps)
+ completed_fraction = step / decay_steps
+ decay = 0.5 * (1.0 + math.cos(math.pi * completed_fraction))
+ return (1.0 - alpha) * decay + alpha
+
+ @test_util.run_in_graph_and_eager_modes
+ def testDecay(self):
+ num_training_steps = 1000
+ initial_lr = 1.0
+ for step in range(0, 1500, 250):
+ decayed_lr = learning_rate_decay_v2.cosine_decay(initial_lr, step,
+ num_training_steps)
+ expected = self.np_cosine_decay(step, num_training_steps)
+ self.assertAllClose(self.evaluate(decayed_lr()), expected, 1e-6)
+
+ @test_util.run_in_graph_and_eager_modes
+ def testAlpha(self):
+ num_training_steps = 1000
+ initial_lr = 1.0
+ alpha = 0.1
+ for step in range(0, 1500, 250):
+ decayed_lr = learning_rate_decay_v2.cosine_decay(initial_lr, step,
+ num_training_steps,
+ alpha)
+ expected = self.np_cosine_decay(step, num_training_steps, alpha)
+ self.assertAllClose(self.evaluate(decayed_lr()), expected, 1e-6)
+
+
+class CosineDecayRestartsTestV2(test_util.TensorFlowTestCase):
+
+ def np_cosine_decay_restarts(self, step, decay_steps, t_mul=2.0, m_mul=1.0,
+ alpha=0.0):
+ fac = 1.0
+ while step >= decay_steps:
+ step -= decay_steps
+ decay_steps *= t_mul
+ fac *= m_mul
+
+ completed_fraction = step / decay_steps
+ decay = fac * 0.5 * (1.0 + math.cos(math.pi * completed_fraction))
+ return (1.0 - alpha) * decay + alpha
+
+ @test_util.run_in_graph_and_eager_modes
+ def testDecay(self):
+ num_training_steps = 1000
+ initial_lr = 1.0
+ for step in range(0, 1500, 250):
+ decayed_lr = learning_rate_decay_v2.cosine_decay_restarts(
+ initial_lr, step, num_training_steps)
+ expected = self.np_cosine_decay_restarts(step, num_training_steps)
+ self.assertAllClose(self.evaluate(decayed_lr()), expected, 1e-6)
+
+ @test_util.run_in_graph_and_eager_modes
+ def testAlpha(self):
+ num_training_steps = 1000
+ initial_lr = 1.0
+ alpha = 0.1
+ for step in range(0, 1500, 250):
+ decayed_lr = learning_rate_decay_v2.cosine_decay_restarts(
+ initial_lr, step, num_training_steps, alpha=alpha)
+ expected = self.np_cosine_decay_restarts(
+ step, num_training_steps, alpha=alpha)
+ self.assertAllClose(self.evaluate(decayed_lr()), expected, 1e-6)
+
+ @test_util.run_in_graph_and_eager_modes
+ def testMMul(self):
+ num_training_steps = 1000
+ initial_lr = 1.0
+ m_mul = 0.9
+ for step in range(0, 1500, 250):
+ decayed_lr = learning_rate_decay_v2.cosine_decay_restarts(
+ initial_lr, step, num_training_steps, m_mul=m_mul)
+ expected = self.np_cosine_decay_restarts(
+ step, num_training_steps, m_mul=m_mul)
+ self.assertAllClose(self.evaluate(decayed_lr()), expected, 1e-6)
+
+ @test_util.run_in_graph_and_eager_modes
+ def testTMul(self):
+ num_training_steps = 1000
+ initial_lr = 1.0
+ t_mul = 1.0
+ for step in range(0, 1500, 250):
+ decayed_lr = learning_rate_decay_v2.cosine_decay_restarts(
+ initial_lr, step, num_training_steps, t_mul=t_mul)
+ expected = self.np_cosine_decay_restarts(
+ step, num_training_steps, t_mul=t_mul)
+ self.assertAllClose(self.evaluate(decayed_lr()), expected, 1e-6)
+
+
+class LinearCosineDecayTestV2(test_util.TensorFlowTestCase):
+
+ def np_linear_cosine_decay(self,
+ step,
+ decay_steps,
+ alpha=0.0,
+ beta=0.001,
+ num_periods=0.5):
+ step = min(step, decay_steps)
+ linear_decayed = float(decay_steps - step) / decay_steps
+ fraction = 2.0 * num_periods * step / float(decay_steps)
+ cosine_decayed = 0.5 * (1.0 + math.cos(math.pi * fraction))
+ return (alpha + linear_decayed) * cosine_decayed + beta
+
+ @test_util.run_in_graph_and_eager_modes
+ def testDefaultDecay(self):
+ num_training_steps = 1000
+ initial_lr = 1.0
+ for step in range(0, 1500, 250):
+ decayed_lr = learning_rate_decay_v2.linear_cosine_decay(
+ initial_lr, step, num_training_steps)
+ expected = self.np_linear_cosine_decay(step, num_training_steps)
+ self.assertAllClose(self.evaluate(decayed_lr()), expected, 1e-6)
+
+ @test_util.run_in_graph_and_eager_modes
+ def testNonDefaultDecay(self):
+ num_training_steps = 1000
+ initial_lr = 1.0
+ for step in range(0, 1500, 250):
+ decayed_lr = learning_rate_decay_v2.linear_cosine_decay(
+ initial_lr,
+ step,
+ num_training_steps,
+ alpha=0.1,
+ beta=1e-4,
+ num_periods=5)
+ expected = self.np_linear_cosine_decay(
+ step, num_training_steps, alpha=0.1, beta=1e-4, num_periods=5)
+ self.assertAllClose(self.evaluate(decayed_lr()), expected, 1e-6)
+
+
+class NoisyLinearCosineDecayTestV2(test_util.TensorFlowTestCase):
+
+ @test_util.run_in_graph_and_eager_modes
+ def testDefaultNoisyLinearCosine(self):
+ num_training_steps = 1000
+ initial_lr = 1.0
+ for step in range(0, 1500, 250):
+ # No numerical check because of noise
+ decayed_lr = learning_rate_decay_v2.noisy_linear_cosine_decay(
+ initial_lr, step, num_training_steps)
+ # Cannot be deterministically tested
+ self.evaluate(decayed_lr())
+
+ @test_util.run_in_graph_and_eager_modes
+ def testNonDefaultNoisyLinearCosine(self):
+ num_training_steps = 1000
+ initial_lr = 1.0
+ for step in range(0, 1500, 250):
+ # No numerical check because of noise
+ decayed_lr = learning_rate_decay_v2.noisy_linear_cosine_decay(
+ initial_lr,
+ step,
+ num_training_steps,
+ initial_variance=0.5,
+ variance_decay=0.1,
+ alpha=0.1,
+ beta=1e-4,
+ num_periods=5)
+ # Cannot be deterministically tested
+ self.evaluate(decayed_lr())
+
+if __name__ == "__main__":
+ googletest.main()
diff --git a/tensorflow/python/training/warm_starting_util.py b/tensorflow/python/training/warm_starting_util.py
index c0dd46bfa5..bea9bb6dff 100644
--- a/tensorflow/python/training/warm_starting_util.py
+++ b/tensorflow/python/training/warm_starting_util.py
@@ -41,6 +41,7 @@ class VocabInfo(
"old_vocab",
"old_vocab_size",
"backup_initializer",
+ "axis",
])):
"""Vocabulary information for warm-starting.
@@ -62,6 +63,42 @@ class VocabInfo(
backup_initializer: [Optional] A variable initializer used for variables
corresponding to new vocabulary entries and OOV. If not provided, these
entries will be zero-initialized.
+ axis: [Optional] Denotes what axis the vocabulary corresponds to. The
+ default, 0, corresponds to the most common use case (embeddings or
+ linear weights for binary classification / regression). An axis of 1
+ could be used for warm-starting output layers with class vocabularies.
+
+ For example:
+
+ embeddings_vocab_info = tf.VocabInfo(
+ new_vocab='embeddings_vocab',
+ new_vocab_size=100,
+ num_oov_buckets=1,
+ old_vocab='pretrained_embeddings_vocab',
+ old_vocab_size=10000,
+ backup_initializer=tf.truncated_normal_initializer(
+ mean=0.0, stddev=(1 / math.sqrt(embedding_dim))),
+ axis=0)
+
+ softmax_output_layer_kernel_vocab_info = tf.VocabInfo(
+ new_vocab='class_vocab',
+ new_vocab_size=5,
+ num_oov_buckets=0, # No OOV for classes.
+ old_vocab='old_class_vocab',
+ old_vocab_size=8,
+ backup_initializer=tf.glorot_uniform_initializer(),
+ axis=1)
+
+ softmax_output_layer_bias_vocab_info = tf.VocabInfo(
+ new_vocab='class_vocab',
+ new_vocab_size=5,
+ num_oov_buckets=0, # No OOV for classes.
+ old_vocab='old_class_vocab',
+ old_vocab_size=8,
+ backup_initializer=tf.zeros_initializer(),
+ axis=0)
+
+ Currently, only axis=0 and axis=1 are supported.
"""
def __new__(cls,
@@ -70,7 +107,12 @@ class VocabInfo(
num_oov_buckets,
old_vocab,
old_vocab_size=-1,
- backup_initializer=None):
+ backup_initializer=None,
+ axis=0):
+ if axis != 0 and axis != 1:
+ raise ValueError("The only supported values for the axis argument are 0 "
+ "and 1. Provided axis: {}".format(axis))
+
return super(VocabInfo, cls).__new__(
cls,
new_vocab,
@@ -79,6 +121,7 @@ class VocabInfo(
old_vocab,
old_vocab_size,
backup_initializer,
+ axis,
)
@@ -149,7 +192,8 @@ def _warm_start_var_with_vocab(var,
previous_vocab_size=-1,
current_oov_buckets=0,
prev_tensor_name=None,
- initializer=None):
+ initializer=None,
+ axis=0):
"""Warm-starts given variable from `prev_tensor_name` tensor in `prev_ckpt`.
Use this method when the `var` is backed by vocabulary. This method stitches
@@ -180,6 +224,7 @@ def _warm_start_var_with_vocab(var,
None, we lookup tensor with same name as given `var`.
initializer: Variable initializer to be used for missing entries. If None,
missing entries will be zero-initialized.
+ axis: Axis of the variable that the provided vocabulary corresponds to.
Raises:
ValueError: If required args are not provided.
@@ -204,6 +249,8 @@ def _warm_start_var_with_vocab(var,
# Assume tensor name remains the same.
prev_tensor_name = _infer_var_name(var)
+ # TODO(eddz): Fix functionality for rank-1 Variables (like FC biases).
+ total_v_first_axis = sum([v.get_shape().as_list()[0] for v in var])
for v in var:
v_shape = v.get_shape().as_list()
slice_info = v._get_save_slice_info()
@@ -213,19 +260,45 @@ def _warm_start_var_with_vocab(var,
full_shape=slice_info.full_shape,
var_offset=slice_info.var_offset)
- # TODO(eddz): Support cases where class vocabularies need remapping too.
+ if axis == 0:
+ new_row_vocab_size = current_vocab_size
+ new_col_vocab_size = v_shape[1]
+ old_row_vocab_size = previous_vocab_size
+ old_row_vocab_file = prev_vocab_path
+ new_row_vocab_file = current_vocab_path
+ old_col_vocab_file = None
+ new_col_vocab_file = None
+ num_row_oov_buckets = current_oov_buckets
+ num_col_oov_buckets = 0
+ elif axis == 1:
+ # Note that we must compute this value across all partitions, whereas
+ # in the axis = 0 case, we can simply use v_shape[1] because we don't
+ # allow partitioning across axis = 1.
+ new_row_vocab_size = total_v_first_axis
+ new_col_vocab_size = current_vocab_size
+ old_row_vocab_size = -1
+ old_row_vocab_file = None
+ new_row_vocab_file = None
+ old_col_vocab_file = prev_vocab_path
+ new_col_vocab_file = current_vocab_path
+ num_row_oov_buckets = 0
+ num_col_oov_buckets = current_oov_buckets
+ else:
+ raise ValueError("The only supported values for the axis argument are 0 "
+ "and 1. Provided axis: {}".format(axis))
+
init = checkpoint_ops._load_and_remap_matrix_initializer(
ckpt_path=checkpoint_utils._get_checkpoint_filename(prev_ckpt),
old_tensor_name=prev_tensor_name,
- new_row_vocab_size=current_vocab_size,
- new_col_vocab_size=v_shape[1],
- old_row_vocab_size=previous_vocab_size,
- old_row_vocab_file=prev_vocab_path,
- new_row_vocab_file=current_vocab_path,
- old_col_vocab_file=None,
- new_col_vocab_file=None,
- num_row_oov_buckets=current_oov_buckets,
- num_col_oov_buckets=0,
+ new_row_vocab_size=new_row_vocab_size,
+ new_col_vocab_size=new_col_vocab_size,
+ old_row_vocab_size=old_row_vocab_size,
+ old_row_vocab_file=old_row_vocab_file,
+ new_row_vocab_file=new_row_vocab_file,
+ old_col_vocab_file=old_col_vocab_file,
+ new_col_vocab_file=new_col_vocab_file,
+ num_row_oov_buckets=num_row_oov_buckets,
+ num_col_oov_buckets=num_col_oov_buckets,
initializer=initializer)
new_init_val = ops.convert_to_tensor(
init(shape=v_shape, partition_info=partition_info))
@@ -374,7 +447,8 @@ def warm_start(ckpt_to_initialize_from,
previous_vocab_size=vocab_info.old_vocab_size,
current_oov_buckets=vocab_info.num_oov_buckets,
prev_tensor_name=prev_var_name,
- initializer=vocab_info.backup_initializer)
+ initializer=vocab_info.backup_initializer,
+ axis=vocab_info.axis)
else:
# For the special value of vars_to_warm_start = None,
# we only warm-start variables with explicitly specified vocabularies.
diff --git a/tensorflow/python/training/warm_starting_util_test.py b/tensorflow/python/training/warm_starting_util_test.py
index 70a84bc3f6..3ee0f6aaa2 100644
--- a/tensorflow/python/training/warm_starting_util_test.py
+++ b/tensorflow/python/training/warm_starting_util_test.py
@@ -107,7 +107,7 @@ class WarmStartingUtilTest(test.TestCase):
"fruit_weights", initializer=[[0.], [0.], [0.], [0.]])
ws_util._warm_start_var(fruit_weights, self.get_temp_dir())
sess.run(variables.global_variables_initializer())
- self.assertAllEqual(prev_val, fruit_weights.eval(sess))
+ self.assertAllClose(prev_val, fruit_weights.eval(sess))
def testWarmStartVarPrevVarPartitioned(self):
_, weights = self._create_prev_run_var(
@@ -123,7 +123,7 @@ class WarmStartingUtilTest(test.TestCase):
"fruit_weights", initializer=[[0.], [0.], [0.], [0.]])
ws_util._warm_start_var(fruit_weights, self.get_temp_dir())
sess.run(variables.global_variables_initializer())
- self.assertAllEqual(prev_val, fruit_weights.eval(sess))
+ self.assertAllClose(prev_val, fruit_weights.eval(sess))
def testWarmStartVarCurrentVarPartitioned(self):
_, prev_val = self._create_prev_run_var(
@@ -143,7 +143,7 @@ class WarmStartingUtilTest(test.TestCase):
fruit_weights = fruit_weights._get_variable_list()
new_val = np.concatenate(
[fruit_weights[0].eval(sess), fruit_weights[1].eval(sess)], axis=0)
- self.assertAllEqual(prev_val, new_val)
+ self.assertAllClose(prev_val, new_val)
def testWarmStartVarBothVarsPartitioned(self):
_, weights = self._create_prev_run_var(
@@ -170,7 +170,7 @@ class WarmStartingUtilTest(test.TestCase):
fruit_weights = fruit_weights._get_variable_list()
new_val = np.concatenate(
[fruit_weights[0].eval(sess), fruit_weights[1].eval(sess)], axis=0)
- self.assertAllEqual(prev_val, new_val)
+ self.assertAllClose(prev_val, new_val)
def testWarmStartVarWithVocab(self):
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
@@ -189,9 +189,34 @@ class WarmStartingUtilTest(test.TestCase):
ws_util._warm_start_var_with_vocab(fruit_weights, new_vocab_path, 5,
self.get_temp_dir(), prev_vocab_path)
sess.run(variables.global_variables_initializer())
- self.assertAllEqual([[2.], [1.5], [1.], [0.5], [0.]],
+ self.assertAllClose([[2.], [1.5], [1.], [0.5], [0.]],
fruit_weights.eval(sess))
+ def testWarmStartVarWithColumnVocab(self):
+ prev_vocab_path = self._write_vocab(["apple", "orange"], "old_vocab")
+ self._create_prev_run_var(
+ "fruit_output_layer",
+ initializer=[[0.5, 0.3], [1., 0.8], [1.5, 1.2], [2., 2.3]])
+
+ # New vocab with elements in reverse order and one new element.
+ new_vocab_path = self._write_vocab(["orange", "apple", "banana"],
+ "new_vocab")
+ # New session and new graph.
+ with ops.Graph().as_default() as g:
+ with self.test_session(graph=g) as sess:
+ fruit_output_layer = variable_scope.get_variable(
+ "fruit_output_layer",
+ initializer=[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
+ [0., 0., 0.]])
+ ws_util._warm_start_var_with_vocab(fruit_output_layer, new_vocab_path,
+ current_vocab_size=3,
+ prev_ckpt=self.get_temp_dir(),
+ prev_vocab_path=prev_vocab_path,
+ axis=1)
+ sess.run(variables.global_variables_initializer())
+ self.assertAllClose([[0.3, 0.5, 0.], [0.8, 1.0, 0.], [1.2, 1.5, 0.],
+ [2.3, 2., 0.]], fruit_output_layer.eval(sess))
+
def testWarmStartVarWithVocabConstrainedOldVocabSize(self):
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
@@ -215,7 +240,7 @@ class WarmStartingUtilTest(test.TestCase):
previous_vocab_size=2)
sess.run(variables.global_variables_initializer())
# Old vocabulary limited to ['apple', 'banana'].
- self.assertAllEqual([[0.], [0.], [1.], [0.5], [0.]],
+ self.assertAllClose([[0.], [0.], [1.], [0.5], [0.]],
fruit_weights.eval(sess))
def testWarmStartVarWithVocabPrevVarPartitioned(self):
@@ -238,9 +263,36 @@ class WarmStartingUtilTest(test.TestCase):
ws_util._warm_start_var_with_vocab(fruit_weights, new_vocab_path, 5,
self.get_temp_dir(), prev_vocab_path)
sess.run(variables.global_variables_initializer())
- self.assertAllEqual([[2.], [1.5], [1.], [0.5], [0.]],
+ self.assertAllClose([[2.], [1.5], [1.], [0.5], [0.]],
fruit_weights.eval(sess))
+ def testWarmStartVarWithColumnVocabPrevVarPartitioned(self):
+ prev_vocab_path = self._write_vocab(["apple", "orange"], "old_vocab")
+ self._create_prev_run_var(
+ "fruit_output_layer",
+ shape=[4, 2],
+ initializer=[[0.5, 0.3], [1., 0.8], [1.5, 1.2], [2., 2.3]],
+ partitioner=lambda shape, dtype: [2, 1])
+
+ # New vocab with elements in reverse order and one new element.
+ new_vocab_path = self._write_vocab(["orange", "apple", "banana"],
+ "new_vocab")
+ # New session and new graph.
+ with ops.Graph().as_default() as g:
+ with self.test_session(graph=g) as sess:
+ fruit_output_layer = variable_scope.get_variable(
+ "fruit_output_layer",
+ initializer=[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
+ [0., 0., 0.]])
+ ws_util._warm_start_var_with_vocab(fruit_output_layer, new_vocab_path,
+ current_vocab_size=3,
+ prev_ckpt=self.get_temp_dir(),
+ prev_vocab_path=prev_vocab_path,
+ axis=1)
+ sess.run(variables.global_variables_initializer())
+ self.assertAllClose([[0.3, 0.5, 0.], [0.8, 1.0, 0.], [1.2, 1.5, 0.],
+ [2.3, 2., 0.]], fruit_output_layer.eval(sess))
+
def testWarmStartVarWithVocabCurrentVarPartitioned(self):
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
@@ -269,11 +321,43 @@ class WarmStartingUtilTest(test.TestCase):
self.assertTrue(
isinstance(fruit_weights, variables.PartitionedVariable))
fruit_weights_vars = fruit_weights._get_variable_list()
- self.assertAllEqual([[2.], [1.5], [1.]],
+ self.assertAllClose([[2.], [1.5], [1.]],
fruit_weights_vars[0].eval(sess))
- self.assertAllEqual([[0.5], [0.], [0.]],
+ self.assertAllClose([[0.5], [0.], [0.]],
fruit_weights_vars[1].eval(sess))
+ def testWarmStartVarWithColumnVocabCurrentVarPartitioned(self):
+ prev_vocab_path = self._write_vocab(["apple", "orange"], "old_vocab")
+ self._create_prev_run_var(
+ "fruit_output_layer",
+ initializer=[[0.5, 0.3], [1., 0.8], [1.5, 1.2], [2., 2.3]])
+
+ # New vocab with elements in reverse order and one new element.
+ new_vocab_path = self._write_vocab(["orange", "apple", "banana"],
+ "new_vocab")
+ # New session and new graph.
+ with ops.Graph().as_default() as g:
+ with self.test_session(graph=g) as sess:
+ fruit_output_layer = variable_scope.get_variable(
+ "fruit_output_layer",
+ shape=[4, 3],
+ initializer=[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
+ [0., 0., 0.]],
+ partitioner=lambda shape, dtype: [2, 1])
+ ws_util._warm_start_var_with_vocab(fruit_output_layer, new_vocab_path,
+ current_vocab_size=3,
+ prev_ckpt=self.get_temp_dir(),
+ prev_vocab_path=prev_vocab_path,
+ axis=1)
+ sess.run(variables.global_variables_initializer())
+ self.assertTrue(
+ isinstance(fruit_output_layer, variables.PartitionedVariable))
+ fruit_output_layer_vars = fruit_output_layer._get_variable_list()
+ self.assertAllClose([[0.3, 0.5, 0.], [0.8, 1.0, 0.]],
+ fruit_output_layer_vars[0].eval(sess))
+ self.assertAllClose([[1.2, 1.5, 0.], [2.3, 2., 0.]],
+ fruit_output_layer_vars[1].eval(sess))
+
def testWarmStartVarWithVocabBothVarsPartitioned(self):
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
@@ -301,11 +385,45 @@ class WarmStartingUtilTest(test.TestCase):
self.assertTrue(
isinstance(fruit_weights, variables.PartitionedVariable))
fruit_weights_vars = fruit_weights._get_variable_list()
- self.assertAllEqual([[2.], [1.5], [1.]],
+ self.assertAllClose([[2.], [1.5], [1.]],
fruit_weights_vars[0].eval(sess))
- self.assertAllEqual([[0.5], [0.], [0.]],
+ self.assertAllClose([[0.5], [0.], [0.]],
fruit_weights_vars[1].eval(sess))
+ def testWarmStartVarWithColumnVocabBothVarsPartitioned(self):
+ prev_vocab_path = self._write_vocab(["apple", "orange"], "old_vocab")
+ self._create_prev_run_var(
+ "fruit_output_layer",
+ shape=[4, 2],
+ initializer=[[0.5, 0.3], [1., 0.8], [1.5, 1.2], [2., 2.3]],
+ partitioner=lambda shape, dtype: [2, 1])
+
+ # New vocab with elements in reverse order and one new element.
+ new_vocab_path = self._write_vocab(["orange", "apple", "banana"],
+ "new_vocab")
+ # New session and new graph.
+ with ops.Graph().as_default() as g:
+ with self.test_session(graph=g) as sess:
+ fruit_output_layer = variable_scope.get_variable(
+ "fruit_output_layer",
+ shape=[4, 3],
+ initializer=[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
+ [0., 0., 0.]],
+ partitioner=lambda shape, dtype: [2, 1])
+ ws_util._warm_start_var_with_vocab(fruit_output_layer, new_vocab_path,
+ current_vocab_size=3,
+ prev_ckpt=self.get_temp_dir(),
+ prev_vocab_path=prev_vocab_path,
+ axis=1)
+ sess.run(variables.global_variables_initializer())
+ self.assertTrue(
+ isinstance(fruit_output_layer, variables.PartitionedVariable))
+ fruit_output_layer_vars = fruit_output_layer._get_variable_list()
+ self.assertAllClose([[0.3, 0.5, 0.], [0.8, 1.0, 0.]],
+ fruit_output_layer_vars[0].eval(sess))
+ self.assertAllClose([[1.2, 1.5, 0.], [2.3, 2., 0.]],
+ fruit_output_layer_vars[1].eval(sess))
+
def testWarmStart_ListOfVariables(self):
# Save checkpoint from which to warm-start.
_, prev_int_val = self._create_prev_run_var("v1", shape=[10, 1],
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.estimator.-vocab-info.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-vocab-info.pbtxt
index 5301b94eb3..b6942cb7ed 100644
--- a/tensorflow/tools/api/golden/v1/tensorflow.estimator.-vocab-info.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.estimator.-vocab-info.pbtxt
@@ -4,6 +4,10 @@ tf_class {
is_instance: "<class \'tensorflow.python.training.warm_starting_util.VocabInfo\'>"
is_instance: "<type \'tuple\'>"
member {
+ name: "axis"
+ mtype: "<type \'property\'>"
+ }
+ member {
name: "backup_initializer"
mtype: "<type \'property\'>"
}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.train.-vocab-info.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.train.-vocab-info.pbtxt
index 4ce7cb1111..39b946b82f 100644
--- a/tensorflow/tools/api/golden/v1/tensorflow.train.-vocab-info.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.train.-vocab-info.pbtxt
@@ -4,6 +4,10 @@ tf_class {
is_instance: "<class \'tensorflow.python.training.warm_starting_util.VocabInfo\'>"
is_instance: "<type \'tuple\'>"
member {
+ name: "axis"
+ mtype: "<type \'property\'>"
+ }
+ member {
name: "backup_initializer"
mtype: "<type \'property\'>"
}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-fixed-length-record-reader.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-fixed-length-record-reader.pbtxt
deleted file mode 100644
index 260c796fd6..0000000000
--- a/tensorflow/tools/api/golden/v2/tensorflow.-fixed-length-record-reader.pbtxt
+++ /dev/null
@@ -1,46 +0,0 @@
-path: "tensorflow.FixedLengthRecordReader"
-tf_class {
- is_instance: "<class \'tensorflow.python.ops.io_ops.FixedLengthRecordReader\'>"
- is_instance: "<class \'tensorflow.python.ops.io_ops.ReaderBase\'>"
- is_instance: "<type \'object\'>"
- member {
- name: "reader_ref"
- mtype: "<type \'property\'>"
- }
- member {
- name: "supports_serialize"
- mtype: "<type \'property\'>"
- }
- member_method {
- name: "__init__"
- argspec: "args=[\'self\', \'record_bytes\', \'header_bytes\', \'footer_bytes\', \'hop_bytes\', \'name\', \'encoding\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], "
- }
- member_method {
- name: "num_records_produced"
- argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "num_work_units_completed"
- argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "read"
- argspec: "args=[\'self\', \'queue\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "read_up_to"
- argspec: "args=[\'self\', \'queue\', \'num_records\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "reset"
- argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "restore_state"
- argspec: "args=[\'self\', \'state\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "serialize_state"
- argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
-}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-identity-reader.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-identity-reader.pbtxt
deleted file mode 100644
index 2eda320d63..0000000000
--- a/tensorflow/tools/api/golden/v2/tensorflow.-identity-reader.pbtxt
+++ /dev/null
@@ -1,46 +0,0 @@
-path: "tensorflow.IdentityReader"
-tf_class {
- is_instance: "<class \'tensorflow.python.ops.io_ops.IdentityReader\'>"
- is_instance: "<class \'tensorflow.python.ops.io_ops.ReaderBase\'>"
- is_instance: "<type \'object\'>"
- member {
- name: "reader_ref"
- mtype: "<type \'property\'>"
- }
- member {
- name: "supports_serialize"
- mtype: "<type \'property\'>"
- }
- member_method {
- name: "__init__"
- argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "num_records_produced"
- argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "num_work_units_completed"
- argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "read"
- argspec: "args=[\'self\', \'queue\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "read_up_to"
- argspec: "args=[\'self\', \'queue\', \'num_records\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "reset"
- argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "restore_state"
- argspec: "args=[\'self\', \'state\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "serialize_state"
- argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
-}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-l-m-d-b-reader.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-l-m-d-b-reader.pbtxt
deleted file mode 100644
index f9b7e9bbca..0000000000
--- a/tensorflow/tools/api/golden/v2/tensorflow.-l-m-d-b-reader.pbtxt
+++ /dev/null
@@ -1,46 +0,0 @@
-path: "tensorflow.LMDBReader"
-tf_class {
- is_instance: "<class \'tensorflow.python.ops.io_ops.LMDBReader\'>"
- is_instance: "<class \'tensorflow.python.ops.io_ops.ReaderBase\'>"
- is_instance: "<type \'object\'>"
- member {
- name: "reader_ref"
- mtype: "<type \'property\'>"
- }
- member {
- name: "supports_serialize"
- mtype: "<type \'property\'>"
- }
- member_method {
- name: "__init__"
- argspec: "args=[\'self\', \'name\', \'options\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
- }
- member_method {
- name: "num_records_produced"
- argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "num_work_units_completed"
- argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "read"
- argspec: "args=[\'self\', \'queue\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "read_up_to"
- argspec: "args=[\'self\', \'queue\', \'num_records\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "reset"
- argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "restore_state"
- argspec: "args=[\'self\', \'state\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "serialize_state"
- argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
-}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-reader-base.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-reader-base.pbtxt
deleted file mode 100644
index f6a3ce76a1..0000000000
--- a/tensorflow/tools/api/golden/v2/tensorflow.-reader-base.pbtxt
+++ /dev/null
@@ -1,45 +0,0 @@
-path: "tensorflow.ReaderBase"
-tf_class {
- is_instance: "<class \'tensorflow.python.ops.io_ops.ReaderBase\'>"
- is_instance: "<type \'object\'>"
- member {
- name: "reader_ref"
- mtype: "<type \'property\'>"
- }
- member {
- name: "supports_serialize"
- mtype: "<type \'property\'>"
- }
- member_method {
- name: "__init__"
- argspec: "args=[\'self\', \'reader_ref\', \'supports_serialize\'], varargs=None, keywords=None, defaults=[\'False\'], "
- }
- member_method {
- name: "num_records_produced"
- argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "num_work_units_completed"
- argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "read"
- argspec: "args=[\'self\', \'queue\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "read_up_to"
- argspec: "args=[\'self\', \'queue\', \'num_records\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "reset"
- argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "restore_state"
- argspec: "args=[\'self\', \'state\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "serialize_state"
- argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
-}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-t-f-record-reader.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-t-f-record-reader.pbtxt
deleted file mode 100644
index cdf7937391..0000000000
--- a/tensorflow/tools/api/golden/v2/tensorflow.-t-f-record-reader.pbtxt
+++ /dev/null
@@ -1,46 +0,0 @@
-path: "tensorflow.TFRecordReader"
-tf_class {
- is_instance: "<class \'tensorflow.python.ops.io_ops.TFRecordReader\'>"
- is_instance: "<class \'tensorflow.python.ops.io_ops.ReaderBase\'>"
- is_instance: "<type \'object\'>"
- member {
- name: "reader_ref"
- mtype: "<type \'property\'>"
- }
- member {
- name: "supports_serialize"
- mtype: "<type \'property\'>"
- }
- member_method {
- name: "__init__"
- argspec: "args=[\'self\', \'name\', \'options\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
- }
- member_method {
- name: "num_records_produced"
- argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "num_work_units_completed"
- argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "read"
- argspec: "args=[\'self\', \'queue\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "read_up_to"
- argspec: "args=[\'self\', \'queue\', \'num_records\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "reset"
- argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "restore_state"
- argspec: "args=[\'self\', \'state\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "serialize_state"
- argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
-}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-text-line-reader.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-text-line-reader.pbtxt
deleted file mode 100644
index e9779f0762..0000000000
--- a/tensorflow/tools/api/golden/v2/tensorflow.-text-line-reader.pbtxt
+++ /dev/null
@@ -1,46 +0,0 @@
-path: "tensorflow.TextLineReader"
-tf_class {
- is_instance: "<class \'tensorflow.python.ops.io_ops.TextLineReader\'>"
- is_instance: "<class \'tensorflow.python.ops.io_ops.ReaderBase\'>"
- is_instance: "<type \'object\'>"
- member {
- name: "reader_ref"
- mtype: "<type \'property\'>"
- }
- member {
- name: "supports_serialize"
- mtype: "<type \'property\'>"
- }
- member_method {
- name: "__init__"
- argspec: "args=[\'self\', \'skip_header_lines\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
- }
- member_method {
- name: "num_records_produced"
- argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "num_work_units_completed"
- argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "read"
- argspec: "args=[\'self\', \'queue\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "read_up_to"
- argspec: "args=[\'self\', \'queue\', \'num_records\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "reset"
- argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "restore_state"
- argspec: "args=[\'self\', \'state\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "serialize_state"
- argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
-}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.-whole-file-reader.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.-whole-file-reader.pbtxt
deleted file mode 100644
index 4ac759891c..0000000000
--- a/tensorflow/tools/api/golden/v2/tensorflow.-whole-file-reader.pbtxt
+++ /dev/null
@@ -1,46 +0,0 @@
-path: "tensorflow.WholeFileReader"
-tf_class {
- is_instance: "<class \'tensorflow.python.ops.io_ops.WholeFileReader\'>"
- is_instance: "<class \'tensorflow.python.ops.io_ops.ReaderBase\'>"
- is_instance: "<type \'object\'>"
- member {
- name: "reader_ref"
- mtype: "<type \'property\'>"
- }
- member {
- name: "supports_serialize"
- mtype: "<type \'property\'>"
- }
- member_method {
- name: "__init__"
- argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "num_records_produced"
- argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "num_work_units_completed"
- argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "read"
- argspec: "args=[\'self\', \'queue\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "read_up_to"
- argspec: "args=[\'self\', \'queue\', \'num_records\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "reset"
- argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "restore_state"
- argspec: "args=[\'self\', \'state\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
- member_method {
- name: "serialize_state"
- argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
- }
-}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.estimator.-vocab-info.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-vocab-info.pbtxt
index 5301b94eb3..b6942cb7ed 100644
--- a/tensorflow/tools/api/golden/v2/tensorflow.estimator.-vocab-info.pbtxt
+++ b/tensorflow/tools/api/golden/v2/tensorflow.estimator.-vocab-info.pbtxt
@@ -4,6 +4,10 @@ tf_class {
is_instance: "<class \'tensorflow.python.training.warm_starting_util.VocabInfo\'>"
is_instance: "<type \'tuple\'>"
member {
+ name: "axis"
+ mtype: "<type \'property\'>"
+ }
+ member {
name: "backup_initializer"
mtype: "<type \'property\'>"
}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.pbtxt
index 7d45ea22c8..9332e16bf6 100644
--- a/tensorflow/tools/api/golden/v2/tensorflow.pbtxt
+++ b/tensorflow/tools/api/golden/v2/tensorflow.pbtxt
@@ -61,10 +61,6 @@ tf_module {
mtype: "<type \'type\'>"
}
member {
- name: "FixedLengthRecordReader"
- mtype: "<type \'type\'>"
- }
- member {
name: "GIT_VERSION"
mtype: "<type \'str\'>"
}
@@ -109,10 +105,6 @@ tf_module {
mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
}
member {
- name: "IdentityReader"
- mtype: "<type \'type\'>"
- }
- member {
name: "IndexedSlices"
mtype: "<type \'type\'>"
}
@@ -121,10 +113,6 @@ tf_module {
mtype: "<type \'type\'>"
}
member {
- name: "LMDBReader"
- mtype: "<type \'type\'>"
- }
- member {
name: "LogMessage"
mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
}
@@ -177,10 +165,6 @@ tf_module {
mtype: "<type \'type\'>"
}
member {
- name: "ReaderBase"
- mtype: "<type \'type\'>"
- }
- member {
name: "RegisterGradient"
mtype: "<type \'type\'>"
}
@@ -225,10 +209,6 @@ tf_module {
mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
}
member {
- name: "TFRecordReader"
- mtype: "<type \'type\'>"
- }
- member {
name: "Tensor"
mtype: "<type \'type\'>"
}
@@ -245,10 +225,6 @@ tf_module {
mtype: "<type \'type\'>"
}
member {
- name: "TextLineReader"
- mtype: "<type \'type\'>"
- }
- member {
name: "VERSION"
mtype: "<type \'str\'>"
}
@@ -273,10 +249,6 @@ tf_module {
mtype: "<class \'enum.EnumMeta\'>"
}
member {
- name: "WholeFileReader"
- mtype: "<type \'type\'>"
- }
- member {
name: "app"
mtype: "<type \'module\'>"
}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.-vocab-info.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.-vocab-info.pbtxt
index 4ce7cb1111..39b946b82f 100644
--- a/tensorflow/tools/api/golden/v2/tensorflow.train.-vocab-info.pbtxt
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.-vocab-info.pbtxt
@@ -4,6 +4,10 @@ tf_class {
is_instance: "<class \'tensorflow.python.training.warm_starting_util.VocabInfo\'>"
is_instance: "<type \'tuple\'>"
member {
+ name: "axis"
+ mtype: "<type \'property\'>"
+ }
+ member {
name: "backup_initializer"
mtype: "<type \'property\'>"
}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.train.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.train.pbtxt
index c35e254843..b21dabbde7 100644
--- a/tensorflow/tools/api/golden/v2/tensorflow.train.pbtxt
+++ b/tensorflow/tools/api/golden/v2/tensorflow.train.pbtxt
@@ -249,14 +249,6 @@ tf_module {
argspec: "args=[\'supervisor\', \'train_step_fn\', \'args\', \'kwargs\', \'master\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'\'], "
}
member_method {
- name: "batch"
- argspec: "args=[\'tensors\', \'batch_size\', \'num_threads\', \'capacity\', \'enqueue_many\', \'shapes\', \'dynamic_pad\', \'allow_smaller_final_batch\', \'shared_name\', \'name\'], varargs=None, keywords=None, defaults=[\'1\', \'32\', \'False\', \'None\', \'False\', \'False\', \'None\', \'None\'], "
- }
- member_method {
- name: "batch_join"
- argspec: "args=[\'tensors_list\', \'batch_size\', \'capacity\', \'enqueue_many\', \'shapes\', \'dynamic_pad\', \'allow_smaller_final_batch\', \'shared_name\', \'name\'], varargs=None, keywords=None, defaults=[\'32\', \'False\', \'None\', \'False\', \'False\', \'None\', \'None\'], "
- }
- member_method {
name: "checkpoint_exists"
argspec: "args=[\'checkpoint_prefix\'], varargs=None, keywords=None, defaults=None"
}
@@ -317,10 +309,6 @@ tf_module {
argspec: "args=[\'ckpt_dir_or_file\', \'assignment_map\'], varargs=None, keywords=None, defaults=None"
}
member_method {
- name: "input_producer"
- argspec: "args=[\'input_tensor\', \'element_shape\', \'num_epochs\', \'shuffle\', \'seed\', \'capacity\', \'shared_name\', \'summary_name\', \'name\', \'cancel_op\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'True\', \'None\', \'32\', \'None\', \'None\', \'None\', \'None\'], "
- }
- member_method {
name: "inverse_time_decay"
argspec: "args=[\'learning_rate\', \'global_step\', \'decay_steps\', \'decay_rate\', \'staircase\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
}
@@ -329,10 +317,6 @@ tf_module {
argspec: "args=[\'checkpoint_dir\', \'latest_filename\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
- name: "limit_epochs"
- argspec: "args=[\'tensor\', \'num_epochs\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
- }
- member_method {
name: "linear_cosine_decay"
argspec: "args=[\'learning_rate\', \'global_step\', \'decay_steps\', \'num_periods\', \'alpha\', \'beta\', \'name\'], varargs=None, keywords=None, defaults=[\'0.5\', \'0.0\', \'0.001\', \'None\'], "
}
@@ -353,22 +337,6 @@ tf_module {
argspec: "args=[\'pattern\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
- name: "maybe_batch"
- argspec: "args=[\'tensors\', \'keep_input\', \'batch_size\', \'num_threads\', \'capacity\', \'enqueue_many\', \'shapes\', \'dynamic_pad\', \'allow_smaller_final_batch\', \'shared_name\', \'name\'], varargs=None, keywords=None, defaults=[\'1\', \'32\', \'False\', \'None\', \'False\', \'False\', \'None\', \'None\'], "
- }
- member_method {
- name: "maybe_batch_join"
- argspec: "args=[\'tensors_list\', \'keep_input\', \'batch_size\', \'capacity\', \'enqueue_many\', \'shapes\', \'dynamic_pad\', \'allow_smaller_final_batch\', \'shared_name\', \'name\'], varargs=None, keywords=None, defaults=[\'32\', \'False\', \'None\', \'False\', \'False\', \'None\', \'None\'], "
- }
- member_method {
- name: "maybe_shuffle_batch"
- argspec: "args=[\'tensors\', \'batch_size\', \'capacity\', \'min_after_dequeue\', \'keep_input\', \'num_threads\', \'seed\', \'enqueue_many\', \'shapes\', \'allow_smaller_final_batch\', \'shared_name\', \'name\'], varargs=None, keywords=None, defaults=[\'1\', \'None\', \'False\', \'None\', \'False\', \'None\', \'None\'], "
- }
- member_method {
- name: "maybe_shuffle_batch_join"
- argspec: "args=[\'tensors_list\', \'batch_size\', \'capacity\', \'min_after_dequeue\', \'keep_input\', \'seed\', \'enqueue_many\', \'shapes\', \'allow_smaller_final_batch\', \'shared_name\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\', \'False\', \'None\', \'None\'], "
- }
- member_method {
name: "natural_exp_decay"
argspec: "args=[\'learning_rate\', \'global_step\', \'decay_steps\', \'decay_rate\', \'staircase\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
}
@@ -385,10 +353,6 @@ tf_module {
argspec: "args=[\'learning_rate\', \'global_step\', \'decay_steps\', \'end_learning_rate\', \'power\', \'cycle\', \'name\'], varargs=None, keywords=None, defaults=[\'0.0001\', \'1.0\', \'False\', \'None\'], "
}
member_method {
- name: "range_input_producer"
- argspec: "args=[\'limit\', \'num_epochs\', \'shuffle\', \'seed\', \'capacity\', \'shared_name\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'True\', \'None\', \'32\', \'None\', \'None\'], "
- }
- member_method {
name: "remove_checkpoint"
argspec: "args=[\'checkpoint_prefix\', \'checkpoint_format_version\', \'meta_graph_suffix\'], varargs=None, keywords=None, defaults=[\'2\', \'meta\'], "
}
@@ -409,22 +373,6 @@ tf_module {
argspec: "args=[\'weights\', \'l1\', \'l2\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
- name: "shuffle_batch"
- argspec: "args=[\'tensors\', \'batch_size\', \'capacity\', \'min_after_dequeue\', \'num_threads\', \'seed\', \'enqueue_many\', \'shapes\', \'allow_smaller_final_batch\', \'shared_name\', \'name\'], varargs=None, keywords=None, defaults=[\'1\', \'None\', \'False\', \'None\', \'False\', \'None\', \'None\'], "
- }
- member_method {
- name: "shuffle_batch_join"
- argspec: "args=[\'tensors_list\', \'batch_size\', \'capacity\', \'min_after_dequeue\', \'seed\', \'enqueue_many\', \'shapes\', \'allow_smaller_final_batch\', \'shared_name\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\', \'False\', \'None\', \'None\'], "
- }
- member_method {
- name: "slice_input_producer"
- argspec: "args=[\'tensor_list\', \'num_epochs\', \'shuffle\', \'seed\', \'capacity\', \'shared_name\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'True\', \'None\', \'32\', \'None\', \'None\'], "
- }
- member_method {
- name: "string_input_producer"
- argspec: "args=[\'string_tensor\', \'num_epochs\', \'shuffle\', \'seed\', \'capacity\', \'shared_name\', \'name\', \'cancel_op\'], varargs=None, keywords=None, defaults=[\'None\', \'True\', \'None\', \'32\', \'None\', \'None\', \'None\'], "
- }
- member_method {
name: "summary_iterator"
argspec: "args=[\'path\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/compatibility/renames_v2.py b/tensorflow/tools/compatibility/renames_v2.py
index 216aa41b60..7e66ad816a 100644
--- a/tensorflow/tools/compatibility/renames_v2.py
+++ b/tensorflow/tools/compatibility/renames_v2.py
@@ -65,6 +65,7 @@ renames = {
'tf.fft': 'tf.spectral.fft',
'tf.floor': 'tf.math.floor',
'tf.gather_nd': 'tf.manip.gather_nd',
+ 'tf.GraphKeys.VARIABLES': 'tf.GraphKeys.GLOBAL_VARIABLES',
'tf.greater': 'tf.math.greater',
'tf.greater_equal': 'tf.math.greater_equal',
'tf.ifft': 'tf.spectral.ifft',
diff --git a/tensorflow/tools/compatibility/tf_upgrade_v2.py b/tensorflow/tools/compatibility/tf_upgrade_v2.py
index 9702430a12..38216ce9b1 100644
--- a/tensorflow/tools/compatibility/tf_upgrade_v2.py
+++ b/tensorflow/tools/compatibility/tf_upgrade_v2.py
@@ -19,6 +19,7 @@ from __future__ import division
from __future__ import print_function
import argparse
+import functools
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import renames_v2
@@ -45,6 +46,29 @@ class TFAPIChangeSpec(ast_edits.APIChangeSpec):
# Specially handled functions.
self.function_handle = {}
+ for decay in ["tf.train.exponential_decay", "tf.train.piecewise_constant",
+ "tf.train.polynomial_decay", "tf.train.natural_exp_decay",
+ "tf.train.inverse_time_decay", "tf.train.cosine_decay",
+ "tf.train.cosine_decay_restarts",
+ "tf.train.linear_cosine_decay",
+ "tf.train.noisy_linear_cosine_decay"]:
+ self.function_handle[decay] = functools.partial(
+ self._learning_rate_decay_handler, decay_name=decay)
+
+ @staticmethod
+ def _learning_rate_decay_handler(file_edit_recorder, node, decay_name):
+ comment = ("ERROR: %s has been changed to return a callable instead of a "
+ "tensor when graph building, but its functionality remains "
+ "unchanged during eager execution (returns a callable like "
+ "before). The converter cannot detect and fix this reliably, so "
+ "you need to inspect this usage manually.\n") % decay_name
+ file_edit_recorder.add(
+ comment,
+ node.lineno,
+ node.col_offset,
+ decay_name,
+ decay_name,
+ error="%s requires manual check." % decay_name)
if __name__ == "__main__":
diff --git a/tensorflow/tools/compatibility/tf_upgrade_v2_test.py b/tensorflow/tools/compatibility/tf_upgrade_v2_test.py
index 57ac04de06..3886c1e8b9 100644
--- a/tensorflow/tools/compatibility/tf_upgrade_v2_test.py
+++ b/tensorflow/tools/compatibility/tf_upgrade_v2_test.py
@@ -63,6 +63,19 @@ class TestUpgrade(test_util.TensorFlowTestCase):
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.math.rsqrt(tf.math.log(3.8))\n")
+ def testLearningRateDecay(self):
+ for decay in ["tf.train.exponential_decay", "tf.train.piecewise_constant",
+ "tf.train.polynomial_decay", "tf.train.natural_exp_decay",
+ "tf.train.inverse_time_decay", "tf.train.cosine_decay",
+ "tf.train.cosine_decay_restarts",
+ "tf.train.linear_cosine_decay",
+ "tf.train.noisy_linear_cosine_decay"]:
+
+ text = "%s(a, b)\n" % decay
+ _, unused_report, errors, new_text = self._upgrade(text)
+ self.assertEqual(text, new_text)
+ self.assertEqual(errors, ["test.py:1: %s requires manual check." % decay])
+
class TestUpgradeFiles(test_util.TensorFlowTestCase):
diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl
index 742f33f68e..2bf867c7e1 100755
--- a/tensorflow/workspace.bzl
+++ b/tensorflow/workspace.bzl
@@ -240,11 +240,11 @@ def tf_workspace(path_prefix = "", tf_repo_name = ""):
tf_http_archive(
name = "jpeg",
urls = [
- "https://mirror.bazel.build/github.com/libjpeg-turbo/libjpeg-turbo/archive/1.5.3.tar.gz",
- "https://github.com/libjpeg-turbo/libjpeg-turbo/archive/1.5.3.tar.gz",
+ "https://mirror.bazel.build/github.com/libjpeg-turbo/libjpeg-turbo/archive/2.0.0.tar.gz",
+ "https://github.com/libjpeg-turbo/libjpeg-turbo/archive/2.0.0.tar.gz",
],
- sha256 = "1a17020f859cb12711175a67eab5c71fc1904e04b587046218e36106e07eabde",
- strip_prefix = "libjpeg-turbo-1.5.3",
+ sha256 = "f892fff427ab3adffc289363eac26d197ce3ccacefe5f5822377348a8166069b",
+ strip_prefix = "libjpeg-turbo-2.0.0",
build_file = clean_dep("//third_party/jpeg:jpeg.BUILD"),
system_build_file = clean_dep("//third_party/systemlibs:jpeg.BUILD"),
)
diff --git a/third_party/jpeg/jpeg.BUILD b/third_party/jpeg/jpeg.BUILD
index 96e7ac061c..5edf4f8120 100644
--- a/third_party/jpeg/jpeg.BUILD
+++ b/third_party/jpeg/jpeg.BUILD
@@ -144,27 +144,27 @@ cc_library(
"jpeglib.h",
"jsimd.h",
"jsimddct.h",
- "simd/jccolor-altivec.c",
- "simd/jcgray-altivec.c",
- "simd/jcsample.h",
- "simd/jcsample-altivec.c",
- "simd/jdcolor-altivec.c",
- "simd/jdmerge-altivec.c",
- "simd/jdsample-altivec.c",
- "simd/jfdctfst-altivec.c",
- "simd/jfdctint-altivec.c",
- "simd/jidctfst-altivec.c",
- "simd/jidctint-altivec.c",
- "simd/jquanti-altivec.c",
"simd/jsimd.h",
- "simd/jsimd_altivec.h",
- "simd/jsimd_powerpc.c",
+ "simd/powerpc/jccolor-altivec.c",
+ "simd/powerpc/jcgray-altivec.c",
+ "simd/powerpc/jcsample-altivec.c",
+ "simd/powerpc/jdcolor-altivec.c",
+ "simd/powerpc/jdmerge-altivec.c",
+ "simd/powerpc/jdsample-altivec.c",
+ "simd/powerpc/jfdctfst-altivec.c",
+ "simd/powerpc/jfdctint-altivec.c",
+ "simd/powerpc/jidctfst-altivec.c",
+ "simd/powerpc/jidctint-altivec.c",
+ "simd/powerpc/jquanti-altivec.c",
+ "simd/powerpc/jsimd.c",
],
hdrs = [
- "simd/jccolext-altivec.c", # should have been named .inc
- "simd/jcgryext-altivec.c", # should have been named .inc
- "simd/jdcolext-altivec.c", # should have been named .inc
- "simd/jdmrgext-altivec.c", # should have been named .inc
+ "simd/powerpc/jccolext-altivec.c",
+ "simd/powerpc/jcgryext-altivec.c",
+ "simd/powerpc/jdcolext-altivec.c",
+ "simd/powerpc/jdmrgext-altivec.c",
+ "simd/powerpc/jcsample.h",
+ "simd/powerpc/jsimd_altivec.h",
],
copts = libjpegturbo_copts,
nocopts = libjpegturbo_nocopts,
@@ -175,6 +175,7 @@ cc_library(
srcs = [
"jchuff.h",
"jconfig.h",
+ "jconfigint.h",
"jdct.h",
"jerror.h",
"jinclude.h",
@@ -183,24 +184,35 @@ cc_library(
"jpeglib.h",
"jsimd.h",
"jsimddct.h",
- "simd/jccolor-sse2-64.o",
- "simd/jcgray-sse2-64.o",
- "simd/jchuff-sse2-64.o",
- "simd/jcsample-sse2-64.o",
- "simd/jdcolor-sse2-64.o",
- "simd/jdmerge-sse2-64.o",
- "simd/jdsample-sse2-64.o",
- "simd/jfdctflt-sse-64.o",
- "simd/jfdctfst-sse2-64.o",
- "simd/jfdctint-sse2-64.o",
- "simd/jidctflt-sse2-64.o",
- "simd/jidctfst-sse2-64.o",
- "simd/jidctint-sse2-64.o",
- "simd/jidctred-sse2-64.o",
- "simd/jquantf-sse2-64.o",
- "simd/jquanti-sse2-64.o",
"simd/jsimd.h",
- "simd/jsimd_x86_64.c",
+ "simd/x86_64/jsimd.c",
+ "simd/x86_64/jccolor-avx2.o",
+ "simd/x86_64/jccolor-sse2.o",
+ "simd/x86_64/jcgray-avx2.o",
+ "simd/x86_64/jcgray-sse2.o",
+ "simd/x86_64/jchuff-sse2.o",
+ "simd/x86_64/jcphuff-sse2.o",
+ "simd/x86_64/jcsample-avx2.o",
+ "simd/x86_64/jcsample-sse2.o",
+ "simd/x86_64/jdcolor-avx2.o",
+ "simd/x86_64/jdcolor-sse2.o",
+ "simd/x86_64/jdmerge-avx2.o",
+ "simd/x86_64/jdmerge-sse2.o",
+ "simd/x86_64/jdsample-avx2.o",
+ "simd/x86_64/jdsample-sse2.o",
+ "simd/x86_64/jfdctflt-sse.o",
+ "simd/x86_64/jfdctfst-sse2.o",
+ "simd/x86_64/jfdctint-avx2.o",
+ "simd/x86_64/jfdctint-sse2.o",
+ "simd/x86_64/jidctflt-sse2.o",
+ "simd/x86_64/jidctfst-sse2.o",
+ "simd/x86_64/jidctint-avx2.o",
+ "simd/x86_64/jidctint-sse2.o",
+ "simd/x86_64/jidctred-sse2.o",
+ "simd/x86_64/jquantf-sse2.o",
+ "simd/x86_64/jquanti-avx2.o",
+ "simd/x86_64/jquanti-sse2.o",
+ "simd/x86_64/jsimdcpu.o",
],
copts = libjpegturbo_copts,
linkstatic = 1,
@@ -210,57 +222,88 @@ cc_library(
genrule(
name = "simd_x86_64_assemblage23",
srcs = [
- "simd/jccolext-sse2-64.asm",
- "simd/jccolor-sse2-64.asm",
- "simd/jcgray-sse2-64.asm",
- "simd/jcgryext-sse2-64.asm",
- "simd/jchuff-sse2-64.asm",
- "simd/jcolsamp.inc",
- "simd/jcsample-sse2-64.asm",
- "simd/jdcolext-sse2-64.asm",
- "simd/jdcolor-sse2-64.asm",
- "simd/jdct.inc",
- "simd/jdmerge-sse2-64.asm",
- "simd/jdmrgext-sse2-64.asm",
- "simd/jdsample-sse2-64.asm",
- "simd/jfdctflt-sse-64.asm",
- "simd/jfdctfst-sse2-64.asm",
- "simd/jfdctint-sse2-64.asm",
- "simd/jidctflt-sse2-64.asm",
- "simd/jidctfst-sse2-64.asm",
- "simd/jidctint-sse2-64.asm",
- "simd/jidctred-sse2-64.asm",
- "simd/jpeg_nbits_table.inc",
- "simd/jquantf-sse2-64.asm",
- "simd/jquanti-sse2-64.asm",
- "simd/jsimdcfg.inc",
- "simd/jsimdext.inc",
+ "jconfig.h",
+ "jconfigint.h",
+ "simd/x86_64/jccolext-avx2.asm",
+ "simd/x86_64/jccolext-sse2.asm",
+ "simd/x86_64/jccolor-avx2.asm",
+ "simd/x86_64/jccolor-sse2.asm",
+ "simd/x86_64/jcgray-avx2.asm",
+ "simd/x86_64/jcgray-sse2.asm",
+ "simd/x86_64/jcgryext-avx2.asm",
+ "simd/x86_64/jcgryext-sse2.asm",
+ "simd/x86_64/jchuff-sse2.asm",
+ "simd/x86_64/jcphuff-sse2.asm",
+ "simd/x86_64/jcsample-avx2.asm",
+ "simd/x86_64/jcsample-sse2.asm",
+ "simd/x86_64/jdcolext-avx2.asm",
+ "simd/x86_64/jdcolext-sse2.asm",
+ "simd/x86_64/jdcolor-avx2.asm",
+ "simd/x86_64/jdcolor-sse2.asm",
+ "simd/x86_64/jdmerge-avx2.asm",
+ "simd/x86_64/jdmerge-sse2.asm",
+ "simd/x86_64/jdmrgext-avx2.asm",
+ "simd/x86_64/jdmrgext-sse2.asm",
+ "simd/x86_64/jdsample-avx2.asm",
+ "simd/x86_64/jdsample-sse2.asm",
+ "simd/x86_64/jfdctflt-sse.asm",
+ "simd/x86_64/jfdctfst-sse2.asm",
+ "simd/x86_64/jfdctint-avx2.asm",
+ "simd/x86_64/jfdctint-sse2.asm",
+ "simd/x86_64/jidctflt-sse2.asm",
+ "simd/x86_64/jidctfst-sse2.asm",
+ "simd/x86_64/jidctint-avx2.asm",
+ "simd/x86_64/jidctint-sse2.asm",
+ "simd/x86_64/jidctred-sse2.asm",
+ "simd/x86_64/jquantf-sse2.asm",
+ "simd/x86_64/jquanti-avx2.asm",
+ "simd/x86_64/jquanti-sse2.asm",
+ "simd/x86_64/jsimdcpu.asm",
+ "simd/nasm/jcolsamp.inc",
+ "simd/nasm/jdct.inc",
+ "simd/nasm/jpeg_nbits_table.inc",
+ "simd/nasm/jsimdcfg.inc",
+ "simd/nasm/jsimdcfg.inc.h",
+ "simd/nasm/jsimdext.inc",
],
outs = [
- "simd/jccolor-sse2-64.o",
- "simd/jcgray-sse2-64.o",
- "simd/jchuff-sse2-64.o",
- "simd/jcsample-sse2-64.o",
- "simd/jdcolor-sse2-64.o",
- "simd/jdmerge-sse2-64.o",
- "simd/jdsample-sse2-64.o",
- "simd/jfdctflt-sse-64.o",
- "simd/jfdctfst-sse2-64.o",
- "simd/jfdctint-sse2-64.o",
- "simd/jidctflt-sse2-64.o",
- "simd/jidctfst-sse2-64.o",
- "simd/jidctint-sse2-64.o",
- "simd/jidctred-sse2-64.o",
- "simd/jquantf-sse2-64.o",
- "simd/jquanti-sse2-64.o",
+ "simd/x86_64/jccolor-avx2.o",
+ "simd/x86_64/jccolor-sse2.o",
+ "simd/x86_64/jcgray-avx2.o",
+ "simd/x86_64/jcgray-sse2.o",
+ "simd/x86_64/jchuff-sse2.o",
+ "simd/x86_64/jcphuff-sse2.o",
+ "simd/x86_64/jcsample-avx2.o",
+ "simd/x86_64/jcsample-sse2.o",
+ "simd/x86_64/jdcolor-avx2.o",
+ "simd/x86_64/jdcolor-sse2.o",
+ "simd/x86_64/jdmerge-avx2.o",
+ "simd/x86_64/jdmerge-sse2.o",
+ "simd/x86_64/jdsample-avx2.o",
+ "simd/x86_64/jdsample-sse2.o",
+ "simd/x86_64/jfdctflt-sse.o",
+ "simd/x86_64/jfdctfst-sse2.o",
+ "simd/x86_64/jfdctint-avx2.o",
+ "simd/x86_64/jfdctint-sse2.o",
+ "simd/x86_64/jidctflt-sse2.o",
+ "simd/x86_64/jidctfst-sse2.o",
+ "simd/x86_64/jidctint-avx2.o",
+ "simd/x86_64/jidctint-sse2.o",
+ "simd/x86_64/jidctred-sse2.o",
+ "simd/x86_64/jquantf-sse2.o",
+ "simd/x86_64/jquanti-avx2.o",
+ "simd/x86_64/jquanti-sse2.o",
+ "simd/x86_64/jsimdcpu.o",
],
cmd = "for out in $(OUTS); do\n" +
" $(location @nasm//:nasm) -f elf64" +
- " -DELF -DPIC -DRGBX_FILLER_0XFF -D__x86_64__ -DARCH_X86_64" +
- " -I $$(dirname $(location simd/jdct.inc))/" +
- " -I $$(dirname $(location simd/jsimdcfg.inc))/" +
+ " -DELF -DPIC -D__x86_64__" +
+ " -I $$(dirname $(location jconfig.h))/" +
+ " -I $$(dirname $(location jconfigint.h))/" +
+ " -I $$(dirname $(location simd/nasm/jsimdcfg.inc.h))/" +
+ " -I $$(dirname $(location simd/x86_64/jccolext-sse2.asm))/" +
" -o $$out" +
- " $$(dirname $(location simd/jdct.inc))/$$(basename $${out%.o}.asm)\n" +
+ " $$(dirname $(location simd/x86_64/jccolext-sse2.asm))/$$(basename $${out%.o}.asm)\n" +
"done",
tools = ["@nasm"],
)
@@ -279,8 +322,8 @@ cc_library(
"jsimd.h",
"jsimddct.h",
"simd/jsimd.h",
- "simd/jsimd_arm.c",
- "simd/jsimd_arm_neon.S",
+ "simd/arm/jsimd.c",
+ "simd/arm/jsimd_neon.S",
],
copts = libjpegturbo_copts,
nocopts = libjpegturbo_nocopts,
@@ -300,8 +343,8 @@ cc_library(
"jsimd.h",
"jsimddct.h",
"simd/jsimd.h",
- "simd/jsimd_arm64.c",
- "simd/jsimd_arm64_neon.S",
+ "simd/arm64/jsimd.c",
+ "simd/arm64/jsimd_neon.S",
],
copts = libjpegturbo_copts,
nocopts = libjpegturbo_nocopts,
@@ -332,50 +375,44 @@ template_rule(
out = "jconfig_win.h",
substitutions = {
"@JPEG_LIB_VERSION@": "62",
- "@VERSION@": "1.5.1",
- "@LIBJPEG_TURBO_VERSION_NUMBER@": "1005001",
- "cmakedefine": "define",
+ "@VERSION@": "2.0.0",
+ "@LIBJPEG_TURBO_VERSION_NUMBER@": "2000000",
"@BITS_IN_JSAMPLE@": "8",
- },
-)
-
-template_rule(
- name = "jconfigint_win",
- src = "win/jconfigint.h.in",
- out = "jconfigint_win.h",
- substitutions = {
- "@VERSION@": "1.5.1",
- "@BUILD@": "20161115",
- "@CMAKE_PROJECT_NAME@": "libjpeg-turbo",
+ "#cmakedefine C_ARITH_CODING_SUPPORTED": "#define C_ARITH_CODING_SUPPORTED",
+ "#cmakedefine D_ARITH_CODING_SUPPORTED": "#define D_ARITH_CODING_SUPPORTED",
+ "#cmakedefine MEM_SRCDST_SUPPORTED": "#define MEM_SRCDST_SUPPORTED",
+ "#cmakedefine WITH_SIMD": "",
},
)
JCONFIG_NOWIN_COMMON_SUBSTITUTIONS = {
- "LIBJPEG_TURBO_VERSION 0": "LIBJPEG_TURBO_VERSION 1.5.1",
- "LIBJPEG_TURBO_VERSION_NUMBER 0": "LIBJPEG_TURBO_VERSION_NUMBER 1005001",
- "#undef C_ARITH_CODING_SUPPORTED": "#define C_ARITH_CODING_SUPPORTED 1",
- "#undef D_ARITH_CODING_SUPPORTED": "#define D_ARITH_CODING_SUPPORTED 1",
- "#undef HAVE_LOCALE_H": "#define HAVE_LOCALE_H 1",
- "#undef HAVE_STDDEF_H": "#define HAVE_STDDEF_H 1",
- "#undef HAVE_STDLIB_H": "#define HAVE_STDLIB_H 1",
- "#undef HAVE_UNSIGNED_CHAR": "#define HAVE_UNSIGNED_CHAR 1",
- "#undef HAVE_UNSIGNED_SHORT": "#define HAVE_UNSIGNED_SHORT 1",
- "#undef INCOMPLETE_TYPES_BROKEN": "",
- "#undef MEM_SRCDST_SUPPORTED": "#define MEM_SRCDST_SUPPORTED 1",
- "#undef NEED_BSD_STRINGS": "",
- "#undef NEED_SYS_TYPES_H": "#define NEED_SYS_TYPES_H 1",
- "#undef __CHAR_UNSIGNED__": "",
+ "@JPEG_LIB_VERSION@": "62",
+ "@VERSION@": "2.0.0",
+ "@LIBJPEG_TURBO_VERSION_NUMBER@": "2000000",
+ "#cmakedefine C_ARITH_CODING_SUPPORTED": "#define C_ARITH_CODING_SUPPORTED",
+ "#cmakedefine D_ARITH_CODING_SUPPORTED": "#define D_ARITH_CODING_SUPPORTED",
+ "#cmakedefine MEM_SRCDST_SUPPORTED": "#define MEM_SRCDST_SUPPORTED",
+ "@BITS_IN_JSAMPLE@": "8",
+ "#cmakedefine HAVE_LOCALE_H": "#define HAVE_LOCALE_H 1",
+ "#cmakedefine HAVE_STDDEF_H": "#define HAVE_STDDEF_H 1",
+ "#cmakedefine HAVE_STDLIB_H": "#define HAVE_STDLIB_H 1",
+ "#cmakedefine NEED_SYS_TYPES_H": "#define NEED_SYS_TYPES_H",
+ "#cmakedefine NEED_BSD_STRINGS": "",
+ "#cmakedefine HAVE_UNSIGNED_CHAR": "#define HAVE_UNSIGNED_CHAR 1",
+ "#cmakedefine HAVE_UNSIGNED_SHORT": "#define HAVE_UNSIGNED_SHORT 1",
+ "#cmakedefine INCOMPLETE_TYPES_BROKEN": "",
+ "#cmakedefine RIGHT_SHIFT_IS_UNSIGNED": "",
+ "#cmakedefine __CHAR_UNSIGNED__": "",
"#undef const": "",
"#undef size_t": "",
- "#undef RIGHT_SHIFT_IS_UNSIGNED": "",
}
JCONFIG_NOWIN_SIMD_SUBSTITUTIONS = {
- "#undef WITH_SIMD": "#define WITH_SIMD 1",
+ "#cmakedefine WITH_SIMD": "#define WITH_SIMD",
}
JCONFIG_NOWIN_NOSIMD_SUBSTITUTIONS = {
- "#undef WITH_SIMD": "",
+ "#cmakedefine WITH_SIMD": "",
}
JCONFIG_NOWIN_SIMD_SUBSTITUTIONS.update(JCONFIG_NOWIN_COMMON_SUBSTITUTIONS)
@@ -396,22 +433,55 @@ template_rule(
substitutions = JCONFIG_NOWIN_SIMD_SUBSTITUTIONS,
)
+JCONFIGINT_COMMON_SUBSTITUTIONS = {
+ "@BUILD@": "20180831",
+ "@VERSION@": "2.0.0",
+ "@CMAKE_PROJECT_NAME@": "libjpeg-turbo",
+ "#undef inline": "",
+ "#cmakedefine HAVE_INTRIN_H": "",
+}
+
+JCONFIGINT_NOWIN_SUBSTITUTIONS = {
+ "#cmakedefine HAVE_BUILTIN_CTZL": "#define HAVE_BUILTIN_CTZL",
+ "@INLINE@": "inline __attribute__((always_inline))",
+ "#define SIZEOF_SIZE_T @SIZE_T@": "#if (__WORDSIZE==64 && !defined(__native_client__))\n" +
+ "#define SIZEOF_SIZE_T 8\n" +
+ "#else\n" +
+ "#define SIZEOF_SIZE_T 4\n" +
+ "#endif\n",
+}
+
+JCONFIGINT_WIN_SUBSTITUTIONS = {
+ "#cmakedefine HAVE_BUILTIN_CTZL": "",
+ "#define INLINE @INLINE@": "#if defined(__GNUC__)\n" +
+ "#define INLINE inline __attribute__((always_inline))\n" +
+ "#elif defined(_MSC_VER)\n" +
+ "#define INLINE __forceinline\n" +
+ "#else\n" +
+ "#define INLINE\n" +
+ "#endif\n",
+ "#define SIZEOF_SIZE_T @SIZE_T@": "#if (__WORDSIZE==64)\n" +
+ "#define SIZEOF_SIZE_T 8\n" +
+ "#else\n" +
+ "#define SIZEOF_SIZE_T 4\n" +
+ "#endif\n",
+}
+
+JCONFIGINT_NOWIN_SUBSTITUTIONS.update(JCONFIGINT_COMMON_SUBSTITUTIONS)
+JCONFIGINT_WIN_SUBSTITUTIONS.update(JCONFIGINT_COMMON_SUBSTITUTIONS)
+
template_rule(
name = "jconfigint_nowin",
src = "jconfigint.h.in",
out = "jconfigint_nowin.h",
- substitutions = {
- "#undef BUILD": "#define BUILD \"20161115\"",
- "#undef inline": "",
- "#undef INLINE": "#define INLINE inline __attribute__((always_inline))",
- "#undef PACKAGE_NAME": "#define PACKAGE_NAME \"libjpeg-turbo\"",
- "#undef VERSION": "#define VERSION \"1.5.1\"",
- "#undef SIZEOF_SIZE_T": "#if (__WORDSIZE==64 && !defined(__native_client__))\n" +
- "#define SIZEOF_SIZE_T 8\n" +
- "#else\n" +
- "#define SIZEOF_SIZE_T 4\n" +
- "#endif\n",
- },
+ substitutions = JCONFIGINT_NOWIN_SUBSTITUTIONS,
+)
+
+template_rule(
+ name = "jconfigint_win",
+ src = "jconfigint.h.in",
+ out = "jconfigint_win.h",
+ substitutions = JCONFIGINT_WIN_SUBSTITUTIONS,
)
genrule(