aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Tim Shen <timshen@google.com>2018-08-30 16:03:10 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-08-30 16:07:27 -0700
commit6f879f891abe2e267c5cf512d034d7c3641cfdb0 (patch)
tree33dfda2aa13bdec06d3aa330dd5816441d449fa7
parent5d5591fbd4624ff7e50f305464667315f2d41ebb (diff)
[XLA] Rename all (Mutable)ArraySlice to absl::Span.
PiperOrigin-RevId: 210998142
-rw-r--r--tensorflow/compiler/aot/embedded_protocol_buffers.cc2
-rw-r--r--tensorflow/compiler/aot/embedded_protocol_buffers.h2
-rw-r--r--tensorflow/compiler/jit/deadness_analysis.cc50
-rw-r--r--tensorflow/compiler/jit/deadness_analysis_internal.h2
-rw-r--r--tensorflow/compiler/jit/encapsulate_subgraphs_pass_test.cc145
-rw-r--r--tensorflow/compiler/jit/partially_decluster_pass.cc2
-rw-r--r--tensorflow/compiler/tests/randomized_tests.cc18
-rw-r--r--tensorflow/compiler/tf2xla/functionalize_control_flow_test.cc24
-rw-r--r--tensorflow/compiler/tf2xla/kernels/batchtospace_op.cc4
-rw-r--r--tensorflow/compiler/tf2xla/kernels/binary_ops.cc30
-rw-r--r--tensorflow/compiler/tf2xla/kernels/cwise_ops.h4
-rw-r--r--tensorflow/compiler/tf2xla/kernels/diag_op.cc4
-rw-r--r--tensorflow/compiler/tf2xla/kernels/image_resize_ops.cc6
-rw-r--r--tensorflow/compiler/tf2xla/kernels/random_ops.cc3
-rw-r--r--tensorflow/compiler/tf2xla/kernels/select_op.cc2
-rw-r--r--tensorflow/compiler/tf2xla/kernels/spacetobatch_op.cc4
-rw-r--r--tensorflow/compiler/tf2xla/kernels/tensor_array_ops.cc2
-rw-r--r--tensorflow/compiler/tf2xla/lib/cholesky.cc2
-rw-r--r--tensorflow/compiler/tf2xla/lib/qr.cc12
-rw-r--r--tensorflow/compiler/tf2xla/lib/scatter.cc6
-rw-r--r--tensorflow/compiler/tf2xla/lib/util.cc20
-rw-r--r--tensorflow/compiler/tf2xla/lib/util.h22
-rw-r--r--tensorflow/compiler/tf2xla/lib/while_loop.cc8
-rw-r--r--tensorflow/compiler/tf2xla/lib/while_loop.h10
-rw-r--r--tensorflow/compiler/tf2xla/literal_util.cc5
-rw-r--r--tensorflow/compiler/tf2xla/literal_util.h5
-rw-r--r--tensorflow/compiler/tf2xla/literal_util_test.cc4
-rw-r--r--tensorflow/compiler/tf2xla/xla_compiler.cc12
-rw-r--r--tensorflow/compiler/tf2xla/xla_compiler.h8
-rw-r--r--tensorflow/compiler/tf2xla/xla_helpers.cc2
-rw-r--r--tensorflow/compiler/tf2xla/xla_helpers.h2
-rw-r--r--tensorflow/compiler/tf2xla/xla_op_kernel.cc2
-rw-r--r--tensorflow/compiler/tf2xla/xla_op_kernel.h2
-rw-r--r--tensorflow/compiler/tf2xla/xla_op_registry.cc8
-rw-r--r--tensorflow/compiler/tf2xla/xla_op_registry.h8
-rw-r--r--tensorflow/compiler/xla/array.h28
-rw-r--r--tensorflow/compiler/xla/array4d_test.cc22
-rw-r--r--tensorflow/compiler/xla/array_test.cc2
-rw-r--r--tensorflow/compiler/xla/client/client.cc8
-rw-r--r--tensorflow/compiler/xla/client/client.h6
-rw-r--r--tensorflow/compiler/xla/client/compile_only_client.cc2
-rw-r--r--tensorflow/compiler/xla/client/compile_only_client.h2
-rw-r--r--tensorflow/compiler/xla/client/executable_build_options.h2
-rw-r--r--tensorflow/compiler/xla/client/lib/math.cc3
-rw-r--r--tensorflow/compiler/xla/client/lib/math.h3
-rw-r--r--tensorflow/compiler/xla/client/lib/numeric.cc4
-rw-r--r--tensorflow/compiler/xla/client/lib/pooling.cc56
-rw-r--r--tensorflow/compiler/xla/client/lib/pooling.h29
-rw-r--r--tensorflow/compiler/xla/client/lib/pooling_test.cc6
-rw-r--r--tensorflow/compiler/xla/client/local_client.cc10
-rw-r--r--tensorflow/compiler/xla/client/local_client.h13
-rw-r--r--tensorflow/compiler/xla/client/padding.cc13
-rw-r--r--tensorflow/compiler/xla/client/padding.h13
-rw-r--r--tensorflow/compiler/xla/client/xla_builder.cc401
-rw-r--r--tensorflow/compiler/xla/client/xla_builder.h572
-rw-r--r--tensorflow/compiler/xla/index_util.cc15
-rw-r--r--tensorflow/compiler/xla/index_util.h12
-rw-r--r--tensorflow/compiler/xla/layout_util.cc12
-rw-r--r--tensorflow/compiler/xla/layout_util.h13
-rw-r--r--tensorflow/compiler/xla/layout_util_test.cc6
-rw-r--r--tensorflow/compiler/xla/literal.cc90
-rw-r--r--tensorflow/compiler/xla/literal.h166
-rw-r--r--tensorflow/compiler/xla/literal_comparison.cc33
-rw-r--r--tensorflow/compiler/xla/literal_test.cc23
-rw-r--r--tensorflow/compiler/xla/literal_util.cc14
-rw-r--r--tensorflow/compiler/xla/literal_util.h47
-rw-r--r--tensorflow/compiler/xla/packed_literal_reader.cc2
-rw-r--r--tensorflow/compiler/xla/python/local_computation_builder.cc82
-rw-r--r--tensorflow/compiler/xla/python/local_computation_builder.h64
-rw-r--r--tensorflow/compiler/xla/python/local_computation_builder.i20
-rw-r--r--tensorflow/compiler/xla/reference_util.cc90
-rw-r--r--tensorflow/compiler/xla/reference_util.h58
-rw-r--r--tensorflow/compiler/xla/service/algebraic_simplifier.cc10
-rw-r--r--tensorflow/compiler/xla/service/algebraic_simplifier_test.cc6
-rw-r--r--tensorflow/compiler/xla/service/backend.cc10
-rw-r--r--tensorflow/compiler/xla/service/backend.h2
-rw-r--r--tensorflow/compiler/xla/service/bfloat16_normalization.cc6
-rw-r--r--tensorflow/compiler/xla/service/bfloat16_propagation.cc2
-rw-r--r--tensorflow/compiler/xla/service/buffer_assignment_test.cc2
-rw-r--r--tensorflow/compiler/xla/service/compile_only_service.cc2
-rw-r--r--tensorflow/compiler/xla/service/compile_only_service.h4
-rw-r--r--tensorflow/compiler/xla/service/copy_insertion.cc2
-rw-r--r--tensorflow/compiler/xla/service/cpu/buffer_info_util.cc2
-rw-r--r--tensorflow/compiler/xla/service/cpu/buffer_info_util.h2
-rw-r--r--tensorflow/compiler/xla/service/cpu/cpu_executable.cc14
-rw-r--r--tensorflow/compiler/xla/service/cpu/cpu_executable.h17
-rw-r--r--tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.cc7
-rw-r--r--tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.h5
-rw-r--r--tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc2
-rw-r--r--tensorflow/compiler/xla/service/cpu/ir_emitter.cc24
-rw-r--r--tensorflow/compiler/xla/service/cpu/ir_emitter.h32
-rw-r--r--tensorflow/compiler/xla/service/cpu/ir_function.cc8
-rw-r--r--tensorflow/compiler/xla/service/cpu/ir_function.h8
-rw-r--r--tensorflow/compiler/xla/service/cpu/vector_support_library.cc2
-rw-r--r--tensorflow/compiler/xla/service/cpu/vector_support_library.h2
-rw-r--r--tensorflow/compiler/xla/service/cpu/xfeed_manager.cc2
-rw-r--r--tensorflow/compiler/xla/service/cpu/xfeed_manager.h3
-rw-r--r--tensorflow/compiler/xla/service/device_memory_allocator.cc2
-rw-r--r--tensorflow/compiler/xla/service/device_memory_allocator.h2
-rw-r--r--tensorflow/compiler/xla/service/elemental_ir_emitter.cc6
-rw-r--r--tensorflow/compiler/xla/service/elemental_ir_emitter_test.cc3
-rw-r--r--tensorflow/compiler/xla/service/executable.cc7
-rw-r--r--tensorflow/compiler/xla/service/executable.h13
-rw-r--r--tensorflow/compiler/xla/service/gather_expander.cc5
-rw-r--r--tensorflow/compiler/xla/service/generic_transfer_manager.cc5
-rw-r--r--tensorflow/compiler/xla/service/generic_transfer_manager.h6
-rw-r--r--tensorflow/compiler/xla/service/gpu/cudnn_convolution_algorithm_picker.cc5
-rw-r--r--tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc26
-rw-r--r--tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.h30
-rw-r--r--tensorflow/compiler/xla/service/gpu/fft_thunk.cc3
-rw-r--r--tensorflow/compiler/xla/service/gpu/fft_thunk.h2
-rw-r--r--tensorflow/compiler/xla/service/gpu/gpu_executable.cc4
-rw-r--r--tensorflow/compiler/xla/service/gpu/gpu_executable.h4
-rw-r--r--tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.cc4
-rw-r--r--tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.h4
-rw-r--r--tensorflow/compiler/xla/service/gpu/ir_emission_utils.cc2
-rw-r--r--tensorflow/compiler/xla/service/gpu/ir_emission_utils.h2
-rw-r--r--tensorflow/compiler/xla/service/gpu/ir_emitter.cc6
-rw-r--r--tensorflow/compiler/xla/service/gpu/ir_emitter.h8
-rw-r--r--tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc68
-rw-r--r--tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.h62
-rw-r--r--tensorflow/compiler/xla/service/gpu/kernel_thunk.cc8
-rw-r--r--tensorflow/compiler/xla/service/gpu/kernel_thunk.h2
-rw-r--r--tensorflow/compiler/xla/service/gpu/pad_for_tensor_cores.cc3
-rw-r--r--tensorflow/compiler/xla/service/gpu/parallel_loop_emitter.cc2
-rw-r--r--tensorflow/compiler/xla/service/gpu/parallel_loop_emitter.h9
-rw-r--r--tensorflow/compiler/xla/service/gpu/tuple_thunk.h3
-rw-r--r--tensorflow/compiler/xla/service/hlo_buffer.h2
-rw-r--r--tensorflow/compiler/xla/service/hlo_computation.cc4
-rw-r--r--tensorflow/compiler/xla/service/hlo_computation.h4
-rw-r--r--tensorflow/compiler/xla/service/hlo_constant_folding_test.cc6
-rw-r--r--tensorflow/compiler/xla/service/hlo_creation_utils.cc42
-rw-r--r--tensorflow/compiler/xla/service/hlo_creation_utils.h39
-rw-r--r--tensorflow/compiler/xla/service/hlo_creation_utils_test.cc10
-rw-r--r--tensorflow/compiler/xla/service/hlo_dataflow_analysis.cc5
-rw-r--r--tensorflow/compiler/xla/service/hlo_dataflow_analysis.h2
-rw-r--r--tensorflow/compiler/xla/service/hlo_evaluator.cc86
-rw-r--r--tensorflow/compiler/xla/service/hlo_evaluator.h12
-rw-r--r--tensorflow/compiler/xla/service/hlo_evaluator_test.cc4
-rw-r--r--tensorflow/compiler/xla/service/hlo_evaluator_typed_visitor.h100
-rw-r--r--tensorflow/compiler/xla/service/hlo_instruction.cc60
-rw-r--r--tensorflow/compiler/xla/service/hlo_instruction.h60
-rw-r--r--tensorflow/compiler/xla/service/hlo_instructions.cc178
-rw-r--r--tensorflow/compiler/xla/service/hlo_instructions.h205
-rw-r--r--tensorflow/compiler/xla/service/hlo_module.cc2
-rw-r--r--tensorflow/compiler/xla/service/hlo_module.h2
-rw-r--r--tensorflow/compiler/xla/service/hlo_module_group_util.cc6
-rw-r--r--tensorflow/compiler/xla/service/hlo_module_group_util.h7
-rw-r--r--tensorflow/compiler/xla/service/hlo_module_test.cc2
-rw-r--r--tensorflow/compiler/xla/service/hlo_parser.cc6
-rw-r--r--tensorflow/compiler/xla/service/hlo_reachability.cc8
-rw-r--r--tensorflow/compiler/xla/service/hlo_reachability.h11
-rw-r--r--tensorflow/compiler/xla/service/hlo_rematerialization.cc4
-rw-r--r--tensorflow/compiler/xla/service/hlo_runner.cc21
-rw-r--r--tensorflow/compiler/xla/service/hlo_runner.h12
-rw-r--r--tensorflow/compiler/xla/service/hlo_scheduling_test.cc2
-rw-r--r--tensorflow/compiler/xla/service/hlo_sharding.cc12
-rw-r--r--tensorflow/compiler/xla/service/hlo_sharding.h4
-rw-r--r--tensorflow/compiler/xla/service/hlo_sharding_metadata.cc2
-rw-r--r--tensorflow/compiler/xla/service/hlo_sharding_test.cc4
-rw-r--r--tensorflow/compiler/xla/service/hlo_value.cc7
-rw-r--r--tensorflow/compiler/xla/service/hlo_value.h10
-rw-r--r--tensorflow/compiler/xla/service/hlo_verifier.cc3
-rw-r--r--tensorflow/compiler/xla/service/indexed_array_analysis.cc36
-rw-r--r--tensorflow/compiler/xla/service/indexed_array_analysis.h12
-rw-r--r--tensorflow/compiler/xla/service/instruction_fusion.cc2
-rw-r--r--tensorflow/compiler/xla/service/instruction_fusion.h2
-rw-r--r--tensorflow/compiler/xla/service/interpreter/executable.cc4
-rw-r--r--tensorflow/compiler/xla/service/interpreter/executable.h4
-rw-r--r--tensorflow/compiler/xla/service/interpreter/executor.h2
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/dynamic_update_slice_util.cc16
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/dynamic_update_slice_util.h13
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.cc2
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.h4
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/ir_array.cc20
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/ir_array.h31
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.h2
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.cc5
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.h4
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/llvm_loop.cc2
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/llvm_loop.h2
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc9
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/llvm_util.h11
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/loop_emitter.cc2
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/loop_emitter.h3
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/tuple_ops.cc3
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/tuple_ops.h3
-rw-r--r--tensorflow/compiler/xla/service/local_service.cc2
-rw-r--r--tensorflow/compiler/xla/service/local_service.h2
-rw-r--r--tensorflow/compiler/xla/service/multi_output_fusion.cc2
-rw-r--r--tensorflow/compiler/xla/service/multi_output_fusion.h2
-rw-r--r--tensorflow/compiler/xla/service/scatter_expander.cc3
-rw-r--r--tensorflow/compiler/xla/service/service.cc33
-rw-r--r--tensorflow/compiler/xla/service/service.h24
-rw-r--r--tensorflow/compiler/xla/service/shape_inference.cc89
-rw-r--r--tensorflow/compiler/xla/service/shape_inference.h64
-rw-r--r--tensorflow/compiler/xla/service/shape_inference_test.cc9
-rw-r--r--tensorflow/compiler/xla/service/transfer_manager.h5
-rw-r--r--tensorflow/compiler/xla/service/tuple_points_to_analysis.cc2
-rw-r--r--tensorflow/compiler/xla/service/tuple_points_to_analysis_test.cc12
-rw-r--r--tensorflow/compiler/xla/service/tuple_util.cc2
-rw-r--r--tensorflow/compiler/xla/service/tuple_util.h2
-rw-r--r--tensorflow/compiler/xla/service/while_loop_analysis.cc3
-rw-r--r--tensorflow/compiler/xla/service/while_util.cc2
-rw-r--r--tensorflow/compiler/xla/service/while_util.h2
-rw-r--r--tensorflow/compiler/xla/shape_util.cc31
-rw-r--r--tensorflow/compiler/xla/shape_util.h72
-rw-r--r--tensorflow/compiler/xla/shape_util_test.cc14
-rw-r--r--tensorflow/compiler/xla/sparse_index_array.cc19
-rw-r--r--tensorflow/compiler/xla/sparse_index_array.h19
-rw-r--r--tensorflow/compiler/xla/tests/array_elementwise_ops_test.cc6
-rw-r--r--tensorflow/compiler/xla/tests/broadcast_simple_test.cc20
-rw-r--r--tensorflow/compiler/xla/tests/client_library_test_base.cc49
-rw-r--r--tensorflow/compiler/xla/tests/client_library_test_base.h114
-rw-r--r--tensorflow/compiler/xla/tests/compilation_cache_test.cc9
-rw-r--r--tensorflow/compiler/xla/tests/copy_test.cc8
-rw-r--r--tensorflow/compiler/xla/tests/deallocation_test.cc2
-rw-r--r--tensorflow/compiler/xla/tests/deconstruct_tuple_test.cc2
-rw-r--r--tensorflow/compiler/xla/tests/dynamic_ops_test.cc14
-rw-r--r--tensorflow/compiler/xla/tests/floor_ceil_test.cc4
-rw-r--r--tensorflow/compiler/xla/tests/fusion_test.cc9
-rw-r--r--tensorflow/compiler/xla/tests/gather_operation_test.cc3
-rw-r--r--tensorflow/compiler/xla/tests/half_test.cc5
-rw-r--r--tensorflow/compiler/xla/tests/hlo_test_base.cc19
-rw-r--r--tensorflow/compiler/xla/tests/hlo_test_base.h15
-rw-r--r--tensorflow/compiler/xla/tests/literal_test_util.h8
-rw-r--r--tensorflow/compiler/xla/tests/local_client_test_base.cc8
-rw-r--r--tensorflow/compiler/xla/tests/local_client_test_base.h8
-rw-r--r--tensorflow/compiler/xla/tests/multioutput_fusion_test.cc1
-rw-r--r--tensorflow/compiler/xla/tests/pred_test.cc2
-rw-r--r--tensorflow/compiler/xla/tests/prng_test.cc16
-rw-r--r--tensorflow/compiler/xla/tests/reduce_test.cc15
-rw-r--r--tensorflow/compiler/xla/tests/reduce_window_test.cc16
-rw-r--r--tensorflow/compiler/xla/tests/reshape_test.cc44
-rw-r--r--tensorflow/compiler/xla/tests/reverse_test.cc25
-rw-r--r--tensorflow/compiler/xla/tests/round_trip_packed_literal_test.cc9
-rw-r--r--tensorflow/compiler/xla/tests/scalar_computations_test.cc10
-rw-r--r--tensorflow/compiler/xla/tests/scatter_test.cc3
-rw-r--r--tensorflow/compiler/xla/tests/select_and_scatter_test.cc4
-rw-r--r--tensorflow/compiler/xla/tests/slice_test.cc2
-rw-r--r--tensorflow/compiler/xla/tests/test_utils.cc10
-rw-r--r--tensorflow/compiler/xla/tests/tuple_test.cc2
-rw-r--r--tensorflow/compiler/xla/tests/xla_hlo_profile_test.cc2
-rw-r--r--tensorflow/compiler/xla/text_literal_writer.cc3
-rw-r--r--tensorflow/compiler/xla/tools/dumped_computation_to_graphviz.cc4
-rw-r--r--tensorflow/compiler/xla/tools/dumped_computation_to_operation_list.cc4
-rw-r--r--tensorflow/compiler/xla/tools/dumped_computation_to_text.cc4
-rw-r--r--tensorflow/compiler/xla/tools/dumped_computation_to_tf_graphdef.cc4
-rw-r--r--tensorflow/compiler/xla/tools/replay_computation.cc4
-rw-r--r--tensorflow/compiler/xla/tools/show_signature.cc4
-rw-r--r--tensorflow/compiler/xla/util.cc19
-rw-r--r--tensorflow/compiler/xla/util.h105
-rw-r--r--tensorflow/compiler/xla/window_util.cc4
-rw-r--r--tensorflow/compiler/xla/window_util.h4
254 files changed, 2330 insertions, 2712 deletions
diff --git a/tensorflow/compiler/aot/embedded_protocol_buffers.cc b/tensorflow/compiler/aot/embedded_protocol_buffers.cc
index 1401aae758..f1e8e5c084 100644
--- a/tensorflow/compiler/aot/embedded_protocol_buffers.cc
+++ b/tensorflow/compiler/aot/embedded_protocol_buffers.cc
@@ -111,7 +111,7 @@ GetTargetMachineFromTriple(StringPiece target_triple) {
StatusOr<EmbeddedProtocolBuffers> CreateEmbeddedProtocolBuffers(
StringPiece target_triple,
- gtl::ArraySlice<ProtobufToEmbed> protobufs_to_embed) {
+ absl::Span<const ProtobufToEmbed> protobufs_to_embed) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<llvm::TargetMachine> target_machine,
GetTargetMachineFromTriple(target_triple));
diff --git a/tensorflow/compiler/aot/embedded_protocol_buffers.h b/tensorflow/compiler/aot/embedded_protocol_buffers.h
index 4e194a6aba..1dbdfa33e4 100644
--- a/tensorflow/compiler/aot/embedded_protocol_buffers.h
+++ b/tensorflow/compiler/aot/embedded_protocol_buffers.h
@@ -84,7 +84,7 @@ struct ProtobufToEmbed {
// EmbeddedProtocolBuffers instance.
StatusOr<EmbeddedProtocolBuffers> CreateEmbeddedProtocolBuffers(
StringPiece target_triple,
- gtl::ArraySlice<ProtobufToEmbed> protobufs_to_embed);
+ absl::Span<const ProtobufToEmbed> protobufs_to_embed);
} // namespace tfcompile
} // namespace tensorflow
diff --git a/tensorflow/compiler/jit/deadness_analysis.cc b/tensorflow/compiler/jit/deadness_analysis.cc
index fe28502f69..82aa03810b 100644
--- a/tensorflow/compiler/jit/deadness_analysis.cc
+++ b/tensorflow/compiler/jit/deadness_analysis.cc
@@ -108,7 +108,7 @@ class Predicate {
virtual string ToString() const = 0;
int64 hash() const { return hash_; }
- virtual gtl::ArraySlice<Predicate*> GetOperands() const = 0;
+ virtual absl::Span<Predicate* const> GetOperands() const = 0;
virtual Kind kind() const = 0;
virtual ~Predicate() {}
@@ -129,7 +129,7 @@ class Predicate {
};
int64 HashPredicateSequence(Predicate::Kind kind,
- gtl::ArraySlice<Predicate*> preds) {
+ absl::Span<Predicate* const> preds) {
int64 hash = ::tensorflow::hash<Predicate::Kind>()(kind);
for (Predicate* pred : preds) {
hash = Hash64Combine(hash, pred->hash());
@@ -159,8 +159,10 @@ class AndPredicate : public Predicate {
Kind kind() const override { return Kind::kAnd; }
- gtl::ArraySlice<Predicate*> GetOperands() const override { return operands_; }
- gtl::ArraySlice<Predicate*> operands() const { return operands_; }
+ absl::Span<Predicate* const> GetOperands() const override {
+ return operands_;
+ }
+ absl::Span<Predicate* const> operands() const { return operands_; }
private:
std::vector<Predicate*> operands_;
@@ -187,8 +189,10 @@ class OrPredicate : public Predicate {
}
Kind kind() const override { return Kind::kOr; }
- gtl::ArraySlice<Predicate*> GetOperands() const override { return operands_; }
- gtl::ArraySlice<Predicate*> operands() const { return operands_; }
+ absl::Span<Predicate* const> GetOperands() const override {
+ return operands_;
+ }
+ absl::Span<Predicate* const> operands() const { return operands_; }
private:
std::vector<Predicate*> operands_;
@@ -207,7 +211,9 @@ class NotPredicate : public Predicate {
Kind kind() const override { return Kind::kNot; }
Predicate* operand() const { return operands_[0]; }
- gtl::ArraySlice<Predicate*> GetOperands() const override { return operands_; }
+ absl::Span<Predicate* const> GetOperands() const override {
+ return operands_;
+ }
private:
std::array<Predicate*, 1> operands_;
@@ -240,7 +246,9 @@ class AndRecurrencePredicate : public Predicate {
Kind kind() const override { return Kind::kAndRecurrence; }
- gtl::ArraySlice<Predicate*> GetOperands() const override { return operands_; }
+ absl::Span<Predicate* const> GetOperands() const override {
+ return operands_;
+ }
private:
std::array<Predicate*, 2> operands_;
@@ -264,7 +272,7 @@ class SymbolPredicate : public Predicate {
}
Kind kind() const override { return Kind::kSymbol; }
- gtl::ArraySlice<Predicate*> GetOperands() const override { return {}; }
+ absl::Span<Predicate* const> GetOperands() const override { return {}; }
// If `must_be_true()` is true this SymbolPredicate represents the proposition
// "tensor_id() is live and evaluates to true".
@@ -313,11 +321,11 @@ template <typename FunctionTy>
// them.
class PredicateFactory {
public:
- Predicate* MakeAndPredicate(gtl::ArraySlice<Predicate*> operands) {
+ Predicate* MakeAndPredicate(absl::Span<Predicate* const> operands) {
return MakeAndOrImpl(operands, /*is_and=*/true);
}
- Predicate* MakeOrPredicate(gtl::ArraySlice<Predicate*> operands) {
+ Predicate* MakeOrPredicate(absl::Span<Predicate* const> operands) {
return MakeAndOrImpl(operands, /*is_and=*/false);
}
@@ -374,7 +382,7 @@ class PredicateFactory {
new PredicateT(std::forward<Args>(args)...));
}
- Predicate* MakeAndOrImpl(gtl::ArraySlice<Predicate*> operands, bool is_and);
+ Predicate* MakeAndOrImpl(absl::Span<Predicate* const> operands, bool is_and);
// Predicate instances are interned, meaning that there is only a single
// instance of a Predicate object with a given content. This makes checking
@@ -387,7 +395,7 @@ class PredicateFactory {
// for the owning pointers to predicate instances.
using SignatureForAndOr =
- std::pair<Predicate::Kind, gtl::ArraySlice<Predicate*>>;
+ std::pair<Predicate::Kind, absl::Span<Predicate* const>>;
using SignatureForNot = Predicate*;
using SignatureForAndRec = std::pair<Predicate*, Predicate*>;
using SignatureForSymbol = std::pair<SafeTensorId, bool>;
@@ -422,8 +430,8 @@ class PredicateFactory {
};
// Common code to create AndPredicate or OrPredicate instances.
-Predicate* PredicateFactory::MakeAndOrImpl(gtl::ArraySlice<Predicate*> operands,
- bool is_and) {
+Predicate* PredicateFactory::MakeAndOrImpl(
+ absl::Span<Predicate* const> operands, bool is_and) {
Predicate::Kind pred_kind =
is_and ? Predicate::Kind::kAnd : Predicate::Kind::kOr;
gtl::FlatSet<Predicate*> simplified_ops_set;
@@ -474,7 +482,7 @@ Predicate* PredicateFactory::MakeAndOrImpl(gtl::ArraySlice<Predicate*> operands,
// NB! Because we'll use a non-owning reference to simplified_ops in the
// key for interned_and_or_instances_ we need to be careful to std::move()
// it all the way through.
- gtl::ArraySlice<Predicate*> operands_slice = simplified_ops;
+ absl::Span<Predicate* const> operands_slice = simplified_ops;
std::unique_ptr<Predicate> new_pred =
is_and ? Make<AndPredicate>(std::move(simplified_ops))
: Make<OrPredicate>(std::move(simplified_ops));
@@ -496,7 +504,7 @@ class DeadnessAnalysisImpl : public DeadnessAnalysis {
: graph_(*graph), vlog_(VLOG_IS_ON(2)) {}
Status Populate();
- Status PopulateWithReversePostOrder(gtl::ArraySlice<Node*> rpo);
+ Status PopulateWithReversePostOrder(absl::Span<Node* const> rpo);
bool HasInputsWithMismatchingDeadness(const Node& node) override;
void Print() const override;
gtl::FlatMap<TensorId, string, TensorId::Hasher> PredicateMapAsString() const;
@@ -527,7 +535,7 @@ class DeadnessAnalysisImpl : public DeadnessAnalysis {
}
}
- void SetPredicate(Node* n, gtl::ArraySlice<int> output_idxs, Predicate* pred,
+ void SetPredicate(Node* n, absl::Span<const int> output_idxs, Predicate* pred,
std::vector<bool>* should_revisit) {
for (int output_idx : output_idxs) {
SetPredicate(n, output_idx, pred, should_revisit);
@@ -625,7 +633,7 @@ Predicate* DeduceStepPredicate(PredicateFactory* predicate_factory,
}
std::vector<Predicate*> and_ops;
- gtl::ArraySlice<Predicate*> recurrent_pred_ops =
+ absl::Span<Predicate* const> recurrent_pred_ops =
backedge_predicate->GetOperands();
bool found_sym = false;
@@ -784,7 +792,7 @@ Status DeadnessAnalysisImpl::Populate() {
}
Status DeadnessAnalysisImpl::PopulateWithReversePostOrder(
- gtl::ArraySlice<Node*> rpo) {
+ absl::Span<Node* const> rpo) {
// This an abstract interpretation over the deadness propagation semantics of
// the graph executor.
//
@@ -924,7 +932,7 @@ Status ComputePredicates(const Graph& graph,
}
Status ComputePredicates(const Graph& graph,
- gtl::ArraySlice<Node*> reverse_post_order,
+ absl::Span<Node* const> reverse_post_order,
PredicateMapTy* out_predicate_map) {
DeadnessAnalysisImpl impl(&graph);
TF_RETURN_IF_ERROR(impl.PopulateWithReversePostOrder(reverse_post_order));
diff --git a/tensorflow/compiler/jit/deadness_analysis_internal.h b/tensorflow/compiler/jit/deadness_analysis_internal.h
index 401d6e406a..3df2679c62 100644
--- a/tensorflow/compiler/jit/deadness_analysis_internal.h
+++ b/tensorflow/compiler/jit/deadness_analysis_internal.h
@@ -32,7 +32,7 @@ Status ComputePredicates(const Graph& graph, PredicateMapTy* out_predicate_map);
// specified in `reverse_post_order` which must be a valid RPO for the graph
// minus NextIteration->Merge edges.
Status ComputePredicates(const Graph& graph,
- gtl::ArraySlice<Node*> reverse_post_order,
+ absl::Span<Node* const> reverse_post_order,
PredicateMapTy* out_predicate_map);
} // namespace deadness_analysis_internal
} // namespace tensorflow
diff --git a/tensorflow/compiler/jit/encapsulate_subgraphs_pass_test.cc b/tensorflow/compiler/jit/encapsulate_subgraphs_pass_test.cc
index b3600fc48b..7bc0ef0303 100644
--- a/tensorflow/compiler/jit/encapsulate_subgraphs_pass_test.cc
+++ b/tensorflow/compiler/jit/encapsulate_subgraphs_pass_test.cc
@@ -379,7 +379,7 @@ Node* InputShaped(const GraphDefBuilder::Options& opts) {
return ops::SourceOp("InputTestShaped", opts);
}
-Node* KnownShapeBase(DataType dtype, const gtl::ArraySlice<int>& shape,
+Node* KnownShapeBase(DataType dtype, absl::Span<const int> shape,
const GraphDefBuilder::Options& opts) {
if (opts.HaveError()) return nullptr;
NodeBuilder node_builder(opts.GetNameForOp("Const"), "Const",
@@ -394,7 +394,7 @@ Node* KnownShapeBase(DataType dtype, const gtl::ArraySlice<int>& shape,
.FinalizeBuilder(&node_builder);
}
-Node* KnownShape(const gtl::ArraySlice<int>& shape,
+Node* KnownShape(absl::Span<const int> shape,
const GraphDefBuilder::Options& opts) {
return KnownShapeBase(DT_FLOAT, shape, opts);
}
@@ -417,8 +417,7 @@ Node* KeyPlaceholder(const string& call_node,
}
Node* RecvAtHost(ops::NodeOut key_input, const string& cluster,
- const string& oc_cluster,
- const gtl::ArraySlice<DataType>& dtypes,
+ const string& oc_cluster, absl::Span<const DataType> dtypes,
const GraphDefBuilder::Options& opts) {
if (opts.HaveError()) return nullptr;
string key =
@@ -892,13 +891,13 @@ TEST(EncapsulateSubgraphsTest, OneFunctionOneOutside) {
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"C:o:0", "c:o:0"},
- {{"Tinputs", gtl::ArraySlice<DataType>({DT_FLOAT, DT_FLOAT})},
- {"Toutputs", gtl::ArraySlice<DataType>({DT_FLOAT})},
- {"ancestors", gtl::ArraySlice<string>({})},
+ {{"Tinputs", absl::Span<const DataType>({DT_FLOAT, DT_FLOAT})},
+ {"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
+ {"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_O1"},
{"shape_inference_graph",
"_outside_compilation_shape_inference_F1_O1"},
- {"shapes", gtl::ArraySlice<DataType>({})},
+ {"shapes", absl::Span<const DataType>({})},
{"_outside_compilation_subgraph", "O1"}},
{"c"}},
},
@@ -1038,26 +1037,26 @@ TEST(EncapsulateSubgraphsTest, OneFunctionTwoOutside) {
{{"outside_compilation_O2_host_compute"},
"XlaHostCompute",
{"F:o:0", "D:o:0"},
- {{"Tinputs", gtl::ArraySlice<DataType>({DT_FLOAT, DT_FLOAT})},
- {"Toutputs", gtl::ArraySlice<DataType>({DT_FLOAT})},
+ {{"Tinputs", absl::Span<const DataType>({DT_FLOAT, DT_FLOAT})},
+ {"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors",
- gtl::ArraySlice<string>({"outside_compilation_O1_host_compute"})},
+ absl::Span<const string>({"outside_compilation_O1_host_compute"})},
{"key", "host_compute_channel_F1_O2"},
{"shape_inference_graph",
"_outside_compilation_shape_inference_F1_O2"},
- {"shapes", gtl::ArraySlice<DataType>({})},
+ {"shapes", absl::Span<const DataType>({})},
{"_outside_compilation_subgraph", "O2"}},
{"F", "outside_compilation_O1_host_compute"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"C:o:0", "D:o:0"},
- {{"Tinputs", gtl::ArraySlice<DataType>({DT_FLOAT, DT_FLOAT})},
- {"Toutputs", gtl::ArraySlice<DataType>({DT_FLOAT})},
- {"ancestors", gtl::ArraySlice<string>({})},
+ {{"Tinputs", absl::Span<const DataType>({DT_FLOAT, DT_FLOAT})},
+ {"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
+ {"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_O1"},
{"shape_inference_graph",
"_outside_compilation_shape_inference_F1_O1"},
- {"shapes", gtl::ArraySlice<DataType>({})},
+ {"shapes", absl::Span<const DataType>({})},
{"_outside_compilation_subgraph", "O1"}},
{"D"}},
},
@@ -1190,13 +1189,13 @@ TEST(EncapsulateSubgraphsTest, TwoFunctionsTwoOutside) {
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"C:o:0", "D:o:0"},
- {{"Tinputs", gtl::ArraySlice<DataType>({DT_FLOAT, DT_FLOAT})},
- {"Toutputs", gtl::ArraySlice<DataType>({DT_FLOAT})},
- {"ancestors", gtl::ArraySlice<string>({})},
+ {{"Tinputs", absl::Span<const DataType>({DT_FLOAT, DT_FLOAT})},
+ {"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
+ {"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_O1"},
{"shape_inference_graph",
"_outside_compilation_shape_inference_F1_O1"},
- {"shapes", gtl::ArraySlice<DataType>({})},
+ {"shapes", absl::Span<const DataType>({})},
{"_outside_compilation_subgraph", "O1"}},
{"D"}},
},
@@ -1213,13 +1212,13 @@ TEST(EncapsulateSubgraphsTest, TwoFunctionsTwoOutside) {
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"G:o:0"},
- {{"Tinputs", gtl::ArraySlice<DataType>({DT_FLOAT})},
- {"Toutputs", gtl::ArraySlice<DataType>({DT_FLOAT})},
- {"ancestors", gtl::ArraySlice<string>({})},
+ {{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
+ {"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
+ {"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F2_O1"},
{"shape_inference_graph", ""},
{"shapes",
- gtl::ArraySlice<TensorShapeProto>({shape_proto_expected})},
+ absl::Span<const TensorShapeProto>({shape_proto_expected})},
{"_outside_compilation_subgraph", "O1"}}},
},
{{"g_0_retval", "G:o:0"}, {"i_0_retval", "I:o:0"}});
@@ -1364,13 +1363,13 @@ TEST(EncapsulateSubgraphsTest, TwoFunctionsTwoOutsideDependencyFromOutside) {
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"C:o:0", "D:o:0"},
- {{"Tinputs", gtl::ArraySlice<DataType>({DT_FLOAT, DT_FLOAT})},
- {"Toutputs", gtl::ArraySlice<DataType>({DT_FLOAT})},
- {"ancestors", gtl::ArraySlice<string>({})},
+ {{"Tinputs", absl::Span<const DataType>({DT_FLOAT, DT_FLOAT})},
+ {"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
+ {"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_O1"},
{"shape_inference_graph",
"_outside_compilation_shape_inference_F1_O1"},
- {"shapes", gtl::ArraySlice<TensorShapeProto>({})},
+ {"shapes", absl::Span<const TensorShapeProto>({})},
{"_outside_compilation_subgraph", "O1"}},
{"D"}},
},
@@ -1386,13 +1385,13 @@ TEST(EncapsulateSubgraphsTest, TwoFunctionsTwoOutsideDependencyFromOutside) {
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"G:o:0"},
- {{"Tinputs", gtl::ArraySlice<DataType>({DT_FLOAT})},
- {"Toutputs", gtl::ArraySlice<DataType>({DT_FLOAT})},
- {"ancestors", gtl::ArraySlice<string>({})},
+ {{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
+ {"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
+ {"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F2_O1"},
{"shape_inference_graph",
"_outside_compilation_shape_inference_F2_O1"},
- {"shapes", gtl::ArraySlice<TensorShapeProto>({})},
+ {"shapes", absl::Span<const TensorShapeProto>({})},
{"_outside_compilation_subgraph", "O1"}}},
},
{{"i_0_retval", "I:o:0"}});
@@ -1495,13 +1494,13 @@ TEST(EncapsulateSubgraphsTest, OutsideCompilationNoInputs) {
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{},
- {{"Tinputs", gtl::ArraySlice<DataType>({})},
- {"Toutputs", gtl::ArraySlice<DataType>({DT_FLOAT})},
- {"ancestors", gtl::ArraySlice<string>({})},
+ {{"Tinputs", absl::Span<const DataType>({})},
+ {"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
+ {"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_O1"},
{"shape_inference_graph", ""},
{"shapes",
- gtl::ArraySlice<TensorShapeProto>({shape_proto_expected})},
+ absl::Span<const TensorShapeProto>({shape_proto_expected})},
{"_outside_compilation_subgraph", "O1"}}},
},
{{"f_0_retval", "F:o:0"}});
@@ -1579,13 +1578,13 @@ TEST(EncapsulateSubgraphsTest, OutsideCompilationControlInput) {
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{},
- {{"Tinputs", gtl::ArraySlice<DataType>({})},
- {"Toutputs", gtl::ArraySlice<DataType>({DT_FLOAT})},
- {"ancestors", gtl::ArraySlice<string>({})},
+ {{"Tinputs", absl::Span<const DataType>({})},
+ {"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
+ {"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_O1"},
{"shape_inference_graph", ""},
{"shapes",
- gtl::ArraySlice<TensorShapeProto>({shape_proto_expected})},
+ absl::Span<const TensorShapeProto>({shape_proto_expected})},
{"_outside_compilation_subgraph", "O1"}},
{"D"}},
},
@@ -1661,12 +1660,12 @@ TEST(EncapsulateSubgraphsTest, OutsideCompilationNoOutputs) {
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"D:o:0"},
- {{"Tinputs", gtl::ArraySlice<DataType>({DT_FLOAT})},
- {"Toutputs", gtl::ArraySlice<DataType>({})},
- {"ancestors", gtl::ArraySlice<string>({})},
+ {{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
+ {"Toutputs", absl::Span<const DataType>({})},
+ {"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_O1"},
{"shape_inference_graph", ""},
- {"shapes", gtl::ArraySlice<TensorShapeProto>({})},
+ {"shapes", absl::Span<const TensorShapeProto>({})},
{"_outside_compilation_subgraph", "O1"}}},
},
{{"f_0_retval", "F:o:0"}});
@@ -1742,12 +1741,12 @@ TEST(EncapsulateSubgraphsTest, OutsideCompilationControlOutput) {
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"D:o:0"},
- {{"Tinputs", gtl::ArraySlice<DataType>({DT_FLOAT})},
- {"Toutputs", gtl::ArraySlice<DataType>({})},
- {"ancestors", gtl::ArraySlice<string>({})},
+ {{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
+ {"Toutputs", absl::Span<const DataType>({})},
+ {"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_O1"},
{"shape_inference_graph", ""},
- {"shapes", gtl::ArraySlice<TensorShapeProto>({})},
+ {"shapes", absl::Span<const TensorShapeProto>({})},
{"_outside_compilation_subgraph", "O1"}}},
},
{{"f_0_retval", "F:o:0"}});
@@ -1846,13 +1845,13 @@ TEST(EncapsulateSubgraphsTest,
{{"outside_compilation_O2_host_compute"},
"XlaHostCompute",
{"F:o:0"},
- {{"Tinputs", gtl::ArraySlice<DataType>({DT_FLOAT})},
- {"Toutputs", gtl::ArraySlice<DataType>({DT_FLOAT})},
- {"ancestors", gtl::ArraySlice<string>({})},
+ {{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
+ {"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
+ {"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_O2"},
{"shape_inference_graph",
"_outside_compilation_shape_inference_F1_O2"},
- {"shapes", gtl::ArraySlice<TensorShapeProto>({})},
+ {"shapes", absl::Span<const TensorShapeProto>({})},
{"_outside_compilation_subgraph", "O2"}}},
},
{{"h_0_retval", "H:o:0"}});
@@ -1955,13 +1954,13 @@ TEST(EncapsulateSubgraphsTest,
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"D:o:0"},
- {{"Tinputs", gtl::ArraySlice<DataType>({DT_FLOAT})},
- {"Toutputs", gtl::ArraySlice<DataType>({DT_FLOAT})},
- {"ancestors", gtl::ArraySlice<string>({})},
+ {{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
+ {"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
+ {"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_O1"},
{"shape_inference_graph",
"_outside_compilation_shape_inference_F1_O1"},
- {"shapes", gtl::ArraySlice<TensorShapeProto>({})},
+ {"shapes", absl::Span<const TensorShapeProto>({})},
{"_outside_compilation_subgraph", "O1"}}},
},
{{"h_0_retval", "H:o:0"}});
@@ -2066,37 +2065,37 @@ TEST(EncapsulateSubgraphsTest, OutsideCompilationClusterDependency) {
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"D:o:0"},
- {{"Tinputs", gtl::ArraySlice<DataType>({DT_FLOAT})},
- {"Toutputs", gtl::ArraySlice<DataType>({DT_FLOAT})},
- {"ancestors", gtl::ArraySlice<string>({})},
+ {{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
+ {"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
+ {"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_O1"},
{"shape_inference_graph",
"_outside_compilation_shape_inference_F1_O1"},
- {"shapes", gtl::ArraySlice<TensorShapeProto>({})},
+ {"shapes", absl::Span<const TensorShapeProto>({})},
{"_outside_compilation_subgraph", "O1"}}},
{{"outside_compilation_O2_host_compute"},
"XlaHostCompute",
{"D:o:0"},
- {{"Tinputs", gtl::ArraySlice<DataType>({DT_FLOAT})},
- {"Toutputs", gtl::ArraySlice<DataType>({})},
+ {{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
+ {"Toutputs", absl::Span<const DataType>({})},
{"ancestors",
- gtl::ArraySlice<string>({"outside_compilation_O1_host_compute"})},
+ absl::Span<const string>({"outside_compilation_O1_host_compute"})},
{"key", "host_compute_channel_F1_O2"},
{"shape_inference_graph", ""},
- {"shapes", gtl::ArraySlice<TensorShapeProto>({})},
+ {"shapes", absl::Span<const TensorShapeProto>({})},
{"_outside_compilation_subgraph", "O2"}},
{"outside_compilation_O1_host_compute"}},
{{"outside_compilation_O3_host_compute"},
"XlaHostCompute",
{"D:o:0"},
- {{"Tinputs", gtl::ArraySlice<DataType>({DT_FLOAT})},
- {"Toutputs", gtl::ArraySlice<DataType>({})},
+ {{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
+ {"Toutputs", absl::Span<const DataType>({})},
{"ancestors",
- gtl::ArraySlice<string>({"outside_compilation_O1_host_compute",
- "outside_compilation_O2_host_compute"})},
+ absl::Span<const string>({"outside_compilation_O1_host_compute",
+ "outside_compilation_O2_host_compute"})},
{"key", "host_compute_channel_F1_O3"},
{"shape_inference_graph", ""},
- {"shapes", gtl::ArraySlice<TensorShapeProto>({})},
+ {"shapes", absl::Span<const TensorShapeProto>({})},
{"_outside_compilation_subgraph", "O3"}},
{"outside_compilation_O1_host_compute",
"outside_compilation_O2_host_compute"}}},
@@ -2272,13 +2271,13 @@ TEST(EncapsulateSubgraphsTest, OutsideCompilationShapeInference) {
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"c:o:0"},
- {{"Tinputs", gtl::ArraySlice<DataType>({DT_FLOAT})},
- {"Toutputs", gtl::ArraySlice<DataType>({DT_FLOAT})},
- {"ancestors", gtl::ArraySlice<string>({})},
+ {{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
+ {"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
+ {"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_O1"},
{"shape_inference_graph",
"_outside_compilation_shape_inference_F1_O1"},
- {"shapes", gtl::ArraySlice<DataType>({})},
+ {"shapes", absl::Span<const DataType>({})},
{"_outside_compilation_subgraph", "O1"}},
{"c"}},
},
diff --git a/tensorflow/compiler/jit/partially_decluster_pass.cc b/tensorflow/compiler/jit/partially_decluster_pass.cc
index 3a9a8c4988..a8f09bfa50 100644
--- a/tensorflow/compiler/jit/partially_decluster_pass.cc
+++ b/tensorflow/compiler/jit/partially_decluster_pass.cc
@@ -22,7 +22,7 @@ limitations under the License.
namespace tensorflow {
namespace {
Status FindNodesToDecluster(const Graph& graph, gtl::FlatSet<Node*>* result,
- gtl::ArraySlice<Node*> post_order) {
+ absl::Span<Node* const> post_order) {
// Find nodes that have at least one user outside their cluster that expects
// hostmem output. These nodes should be cloned to outside the cluster to
// avoid the device-host copy we'd otherwise need.
diff --git a/tensorflow/compiler/tests/randomized_tests.cc b/tensorflow/compiler/tests/randomized_tests.cc
index 1b8198dba8..0faf0fd8ed 100644
--- a/tensorflow/compiler/tests/randomized_tests.cc
+++ b/tensorflow/compiler/tests/randomized_tests.cc
@@ -275,13 +275,13 @@ class OpTest : public ::testing::Test {
// Select a random element from 'candidates'.
template <typename T>
- T Choose(gtl::ArraySlice<T> candidates);
+ T Choose(absl::Span<const T> candidates);
static constexpr int kDefaultMaxRank = 5;
static constexpr int64 kDefaultMaxDimensionSize = 256LL;
// Returns true if 'dims' have a size less than tf_xla_max_tensor_size.
- bool TensorSizeIsOk(gtl::ArraySlice<int64> dims);
+ bool TensorSizeIsOk(absl::Span<const int64> dims);
// Returns a random dimension size, in the range [min, max).
int64 RandomDim(int64 min = 0, int64 max = kDefaultMaxDimensionSize);
@@ -307,11 +307,11 @@ class OpTest : public ::testing::Test {
// of the type's range. If the shape is omitted, a random shape is used.
// TODO(phawkins): generalize this code to a caller-supplied distribution.
Tensor RandomTensor(DataType dtype, bool needs_unique_values,
- gtl::ArraySlice<int64> shape);
+ absl::Span<const int64> shape);
Tensor RandomTensor(DataType dtype);
// Like RandomTensor, but uses values >= 0.
- Tensor RandomNonNegativeTensor(DataType dtype, gtl::ArraySlice<int64> shape);
+ Tensor RandomNonNegativeTensor(DataType dtype, absl::Span<const int64> shape);
Tensor RandomNonNegativeTensor(DataType dtype);
// Returns a random subset of the integers in the range [0, rank), suitable
@@ -415,7 +415,7 @@ void OpTest::Repeatedly(const std::function<TestResult(void)>& fn) {
}
template <typename T>
-T OpTest::Choose(gtl::ArraySlice<T> candidates) {
+T OpTest::Choose(absl::Span<const T> candidates) {
std::uniform_int_distribution<size_t> d(0, candidates.size() - 1);
return candidates[d(generator())];
}
@@ -425,7 +425,7 @@ int64 OpTest::RandomDim(int64 min, int64 max) {
return size_distribution(generator());
}
-bool OpTest::TensorSizeIsOk(gtl::ArraySlice<int64> dims) {
+bool OpTest::TensorSizeIsOk(absl::Span<const int64> dims) {
int64 size = 1LL;
for (int64 dim : dims) {
size *= dim;
@@ -451,7 +451,7 @@ std::vector<int64> OpTest::RandomDims(int min_rank, int max_rank,
}
Tensor OpTest::RandomTensor(DataType dtype, bool needs_unique_values,
- gtl::ArraySlice<int64> shape) {
+ absl::Span<const int64> shape) {
Tensor tensor(dtype, TensorShape(shape));
switch (dtype) {
case DT_FLOAT: {
@@ -548,7 +548,7 @@ Tensor OpTest::RandomTensor(DataType dtype) {
}
Tensor OpTest::RandomNonNegativeTensor(DataType dtype,
- gtl::ArraySlice<int64> shape) {
+ absl::Span<const int64> shape) {
Tensor tensor(dtype, TensorShape(shape));
switch (dtype) {
case DT_FLOAT: {
@@ -1884,7 +1884,7 @@ TEST_F(OpTest, DynamicStitch) {
for (int i = 0; i < n; ++i) {
TensorShape shape(index_dims[i]);
Tensor t = test::AsTensor<int32>(
- gtl::ArraySlice<int32>(indices).subspan(pos, shape.num_elements()),
+ absl::Span<const int32>(indices).subspan(pos, shape.num_elements()),
shape);
builder.Input(t);
pos += t.NumElements();
diff --git a/tensorflow/compiler/tf2xla/functionalize_control_flow_test.cc b/tensorflow/compiler/tf2xla/functionalize_control_flow_test.cc
index cc52057f21..c068a4110c 100644
--- a/tensorflow/compiler/tf2xla/functionalize_control_flow_test.cc
+++ b/tensorflow/compiler/tf2xla/functionalize_control_flow_test.cc
@@ -805,11 +805,11 @@ TEST(FunctionalizeControlFlow, Complex) {
auto assign = ops::AssignAddVariableOp(
scope.WithOpName("outer/inner/assign_add"), enter_var, add_jkx);
- auto one =
- ops::Const<int32>(scope.WithOpName("outer/inner/One")
- .WithControlDependencies(
- gtl::ArraySlice<Operation>{assign.operation}),
- 1);
+ auto one = ops::Const<int32>(
+ scope.WithOpName("outer/inner/One")
+ .WithControlDependencies(
+ absl::Span<const Operation>{assign.operation}),
+ 1);
auto add_j =
ops::Add(scope.WithOpName("outer/inner/add_j"), identity_j, one);
@@ -823,7 +823,7 @@ TEST(FunctionalizeControlFlow, Complex) {
scope.WithOpName("outer/add/y").WithControlDependencies(identity_i), 1);
auto add_i =
ops::Add(scope.WithOpName("outer/add")
- .WithControlDependencies(gtl::ArraySlice<Operation>{
+ .WithControlDependencies(absl::Span<const Operation>{
exit_j.output.op(), exit_k.output.op()}),
identity_i, one_outer);
auto next_iteration_i =
@@ -929,7 +929,7 @@ TEST(FunctionalizeControlFlow, Complex) {
scope.WithOpName("outer/add/y").WithControlDependencies(identity_i), 1);
auto add_i =
ops::Add(scope.WithOpName("outer/add")
- .WithControlDependencies(gtl::ArraySlice<Operation>{
+ .WithControlDependencies(absl::Span<const Operation>{
while_op[0].op(), while_op[1].op()}),
identity_i, one_outer);
@@ -991,11 +991,11 @@ TEST(FunctionalizeControlFlow, Complex) {
auto assign = ops::AssignAddVariableOp(
scope.WithOpName("outer/inner/assign_add"), arg3, add_jkx);
- auto one =
- ops::Const<int32>(scope.WithOpName("outer/inner/One")
- .WithControlDependencies(
- gtl::ArraySlice<Operation>{assign.operation}),
- 1);
+ auto one = ops::Const<int32>(
+ scope.WithOpName("outer/inner/One")
+ .WithControlDependencies(
+ absl::Span<const Operation>{assign.operation}),
+ 1);
auto add_j =
ops::Add(scope.WithOpName("outer/inner/add_j"), identity_j, one);
diff --git a/tensorflow/compiler/tf2xla/kernels/batchtospace_op.cc b/tensorflow/compiler/tf2xla/kernels/batchtospace_op.cc
index 48f2a005ab..edced6bc0e 100644
--- a/tensorflow/compiler/tf2xla/kernels/batchtospace_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/batchtospace_op.cc
@@ -23,7 +23,7 @@ namespace {
void BatchToSpace(XlaOpKernelContext* ctx, const xla::XlaOp& input,
DataType input_dtype, const TensorShape& input_tensor_shape,
- gtl::ArraySlice<int64> block_shape,
+ absl::Span<const int64> block_shape,
const xla::Literal& crops) {
const int input_rank = input_tensor_shape.dims();
const gtl::InlinedVector<int64, 4> input_shape =
@@ -34,7 +34,7 @@ void BatchToSpace(XlaOpKernelContext* ctx, const xla::XlaOp& input,
ctx, input_rank >= 1 + block_rank,
errors::InvalidArgument("input rank should be >= ", 1 + block_rank,
" instead of ", input_rank));
- gtl::ArraySlice<int64> remainder_shape(input_shape);
+ absl::Span<const int64> remainder_shape(input_shape);
remainder_shape.remove_prefix(1 + block_rank);
OP_REQUIRES(
diff --git a/tensorflow/compiler/tf2xla/kernels/binary_ops.cc b/tensorflow/compiler/tf2xla/kernels/binary_ops.cc
index 2c328102e0..df17da4c1c 100644
--- a/tensorflow/compiler/tf2xla/kernels/binary_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/binary_ops.cc
@@ -30,21 +30,21 @@ namespace {
// A subclass of a XlaBinaryOp must build the computation that
// describes the (tensor,tensor)->tensor function to apply to each element of
// the input.
-#define XLA_MAKE_BINARY(NAME, HLO) \
- class NAME##Op : public XlaBinaryOp { \
- public: \
- explicit NAME##Op(OpKernelConstruction* ctx) : XlaBinaryOp(ctx) {} \
- xla::XlaOp Computation( \
- XlaOpKernelContext* ctx, const xla::XlaOp& lhs, \
- const gtl::ArraySlice<int64>& lhs_shape, const xla::XlaOp& rhs, \
- const gtl::ArraySlice<int64>& rhs_shape, \
- const BCast& broadcast_helper, \
- const std::vector<int64>& extend_dimensions) override { \
- xla::XlaBuilder* b = ctx->builder(); \
- (void)b; \
- return HLO; \
- } \
- }; \
+#define XLA_MAKE_BINARY(NAME, HLO) \
+ class NAME##Op : public XlaBinaryOp { \
+ public: \
+ explicit NAME##Op(OpKernelConstruction* ctx) : XlaBinaryOp(ctx) {} \
+ xla::XlaOp Computation( \
+ XlaOpKernelContext* ctx, const xla::XlaOp& lhs, \
+ const absl::Span<const int64>& lhs_shape, const xla::XlaOp& rhs, \
+ const absl::Span<const int64>& rhs_shape, \
+ const BCast& broadcast_helper, \
+ const std::vector<int64>& extend_dimensions) override { \
+ xla::XlaBuilder* b = ctx->builder(); \
+ (void)b; \
+ return HLO; \
+ } \
+ }; \
REGISTER_XLA_OP(Name(#NAME), NAME##Op)
XLA_MAKE_BINARY(Add, xla::Add(lhs, rhs, extend_dimensions));
diff --git a/tensorflow/compiler/tf2xla/kernels/cwise_ops.h b/tensorflow/compiler/tf2xla/kernels/cwise_ops.h
index a5b870f8db..6653944a91 100644
--- a/tensorflow/compiler/tf2xla/kernels/cwise_ops.h
+++ b/tensorflow/compiler/tf2xla/kernels/cwise_ops.h
@@ -57,8 +57,8 @@ class XlaBinaryOp : public XlaOpKernel {
// in the XLA documentation.
virtual xla::XlaOp Computation(
XlaOpKernelContext* ctx, const xla::XlaOp& lhs,
- const gtl::ArraySlice<int64>& lhs_shape, const xla::XlaOp& rhs,
- const gtl::ArraySlice<int64>& rhs_shape, const BCast& broadcast_helper,
+ const absl::Span<const int64>& lhs_shape, const xla::XlaOp& rhs,
+ const absl::Span<const int64>& rhs_shape, const BCast& broadcast_helper,
const std::vector<int64>& extend_dimensions) = 0;
void Compile(XlaOpKernelContext* ctx) override;
diff --git a/tensorflow/compiler/tf2xla/kernels/diag_op.cc b/tensorflow/compiler/tf2xla/kernels/diag_op.cc
index 70c3eaf66b..49c12fc232 100644
--- a/tensorflow/compiler/tf2xla/kernels/diag_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/diag_op.cc
@@ -29,7 +29,7 @@ namespace {
// Create a diagonal / batch diagonal matrix with 'input' on the diagonal.
xla::XlaOp CreateDiagonal(xla::XlaOp input, int64 last_dim_size,
- gtl::ArraySlice<int64> other_dims,
+ absl::Span<const int64> other_dims,
xla::PrimitiveType element_type) {
xla::XlaBuilder* builder = input.builder();
// Create two matrices that have the following forms, and compare them:
@@ -177,7 +177,7 @@ class MatrixDiagOp : public XlaOpKernel {
int last_dim = dims.size() - 1;
int64 last_dim_size = input_shape.dim_size(last_dim);
- tensorflow::gtl::ArraySlice<int64> other_dims(dims);
+ absl::Span<const int64> other_dims(dims);
other_dims.remove_suffix(1);
xla::XlaOp input = ctx->Input(0);
diff --git a/tensorflow/compiler/tf2xla/kernels/image_resize_ops.cc b/tensorflow/compiler/tf2xla/kernels/image_resize_ops.cc
index 8e071bf0b7..d9a0257b70 100644
--- a/tensorflow/compiler/tf2xla/kernels/image_resize_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/image_resize_ops.cc
@@ -78,7 +78,7 @@ struct ResizeConvolutionDims {
std::vector<int64> stride;
};
ResizeConvolutionDims ComputeResizeConvolutionParameters(
- gtl::ArraySlice<int64> in_size, gtl::ArraySlice<int64> out_size,
+ absl::Span<const int64> in_size, absl::Span<const int64> out_size,
bool align_corners) {
CHECK_EQ(in_size.size(), out_size.size());
int num_spatial_dims = in_size.size();
@@ -147,7 +147,7 @@ std::vector<float> Make1DKernel(int64 n) {
const int64 kMax2DKernelSize = 16;
xla::XlaOp MakeBilinearResizeKernel(xla::XlaBuilder* builder,
- gtl::ArraySlice<int64> kernel_size,
+ absl::Span<const int64> kernel_size,
int64 channels) {
xla::XlaOp channels_iota = xla::Iota(builder, xla::S32, channels);
@@ -165,7 +165,7 @@ xla::XlaOp MakeBilinearResizeKernel(xla::XlaBuilder* builder,
}
xla::XlaOp MakeBilinearResizeKernelInDim(xla::XlaBuilder* builder,
- gtl::ArraySlice<int64> kernel_size,
+ absl::Span<const int64> kernel_size,
int64 channels, int64 dim) {
xla::XlaOp channels_iota = xla::Iota(builder, xla::S32, channels);
diff --git a/tensorflow/compiler/tf2xla/kernels/random_ops.cc b/tensorflow/compiler/tf2xla/kernels/random_ops.cc
index 2da9340625..afd5986846 100644
--- a/tensorflow/compiler/tf2xla/kernels/random_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/random_ops.cc
@@ -155,7 +155,8 @@ class RandomShuffleOp : public XlaOpKernel {
xla::XlaOp indices = xla::Iota(builder, xla::S32, n);
// Swap the indices at i and swaps[i].
- auto swap_body_fn = [&](xla::XlaOp i, gtl::ArraySlice<xla::XlaOp> loop_vars,
+ auto swap_body_fn = [&](xla::XlaOp i,
+ absl::Span<const xla::XlaOp> loop_vars,
xla::XlaBuilder* builder)
-> xla::StatusOr<std::vector<xla::XlaOp>> {
auto swaps = loop_vars[0];
diff --git a/tensorflow/compiler/tf2xla/kernels/select_op.cc b/tensorflow/compiler/tf2xla/kernels/select_op.cc
index d9578eca5b..9e4c57c9bf 100644
--- a/tensorflow/compiler/tf2xla/kernels/select_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/select_op.cc
@@ -66,7 +66,7 @@ class SelectOp : public XlaOpKernel {
// XLA. It seems we have to broadcast on the left and then Reshape
// to get the dimensions in the right order.
const auto dim_sizes = then_shape.dim_sizes();
- gtl::ArraySlice<int64> bdims = dim_sizes;
+ absl::Span<const int64> bdims = dim_sizes;
bdims.remove_prefix(1);
cond_handle = xla::Broadcast(cond_handle, bdims);
diff --git a/tensorflow/compiler/tf2xla/kernels/spacetobatch_op.cc b/tensorflow/compiler/tf2xla/kernels/spacetobatch_op.cc
index 7327258c31..b7b4f3a546 100644
--- a/tensorflow/compiler/tf2xla/kernels/spacetobatch_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/spacetobatch_op.cc
@@ -23,7 +23,7 @@ namespace {
void SpaceToBatch(XlaOpKernelContext* ctx, const xla::XlaOp& input,
DataType input_dtype, const TensorShape& input_tensor_shape,
- gtl::ArraySlice<int64> block_shape,
+ absl::Span<const int64> block_shape,
const xla::Literal& paddings) {
const int input_rank = input_tensor_shape.dims();
const gtl::InlinedVector<int64, 4> input_shape =
@@ -34,7 +34,7 @@ void SpaceToBatch(XlaOpKernelContext* ctx, const xla::XlaOp& input,
ctx, input_rank >= 1 + block_rank,
errors::InvalidArgument("input rank should be >= ", 1 + block_rank,
" instead of ", input_rank));
- gtl::ArraySlice<int64> remainder_shape(input_shape);
+ absl::Span<const int64> remainder_shape(input_shape);
remainder_shape.remove_prefix(1 + block_rank);
OP_REQUIRES(
diff --git a/tensorflow/compiler/tf2xla/kernels/tensor_array_ops.cc b/tensorflow/compiler/tf2xla/kernels/tensor_array_ops.cc
index be1814d8e3..bb114d1aed 100644
--- a/tensorflow/compiler/tf2xla/kernels/tensor_array_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/tensor_array_ops.cc
@@ -122,7 +122,7 @@ Status GetTensorArrayShape(const XlaResource* resource,
// relevant slice of 'operand'.
xla::XlaOp DynamicAddSlice(xla::XlaBuilder* builder, const xla::XlaOp& operand,
const xla::XlaOp& update,
- const gtl::ArraySlice<int64>& update_dims,
+ absl::Span<const int64> update_dims,
const xla::XlaOp& start_indices) {
xla::XlaOp current = xla::DynamicSlice(operand, start_indices, update_dims);
xla::XlaOp sum = xla::Add(current, update);
diff --git a/tensorflow/compiler/tf2xla/lib/cholesky.cc b/tensorflow/compiler/tf2xla/lib/cholesky.cc
index ff3de75ad2..c50a8de33e 100644
--- a/tensorflow/compiler/tf2xla/lib/cholesky.cc
+++ b/tensorflow/compiler/tf2xla/lib/cholesky.cc
@@ -64,7 +64,7 @@ xla::XlaOp CholeskyUnblocked(xla::XlaOp a,
xla::XlaOp l = xla::ZerosLike(a);
// Construct the for loop body to iterate over rows.
- auto body_fn = [&](xla::XlaOp i, gtl::ArraySlice<xla::XlaOp> loop_vars,
+ auto body_fn = [&](xla::XlaOp i, absl::Span<const xla::XlaOp> loop_vars,
xla::XlaBuilder* body_builder)
-> xla::StatusOr<std::vector<xla::XlaOp>> {
xla::Shape col_shape;
diff --git a/tensorflow/compiler/tf2xla/lib/qr.cc b/tensorflow/compiler/tf2xla/lib/qr.cc
index df2504a0f9..0a140fa93c 100644
--- a/tensorflow/compiler/tf2xla/lib/qr.cc
+++ b/tensorflow/compiler/tf2xla/lib/qr.cc
@@ -65,9 +65,9 @@ namespace {
// return (v, tau, beta)
// TODO(phawkins): LAPACK's xLARFG implementation has code for handling
// overflows in the norm/beta calculations. Perhaps do the same here.
-xla::Status House(xla::XlaOp x, xla::XlaOp k, gtl::ArraySlice<int64> batch_dims,
- const int64 m, xla::XlaOp* v, xla::XlaOp* tau,
- xla::XlaOp* beta) {
+xla::Status House(xla::XlaOp x, xla::XlaOp k,
+ absl::Span<const int64> batch_dims, const int64 m,
+ xla::XlaOp* v, xla::XlaOp* tau, xla::XlaOp* beta) {
xla::XlaBuilder* const builder = x.builder();
TF_ASSIGN_OR_RETURN(xla::Shape x_shape, builder->GetShape(x));
const xla::PrimitiveType type = x_shape.element_type();
@@ -173,7 +173,7 @@ xla::StatusOr<QRBlockResult> QRBlock(
std::iota(batch_dim_indices.begin(), batch_dim_indices.end(), 0);
auto qr_body_fn =
- [&](xla::XlaOp j, gtl::ArraySlice<xla::XlaOp> values,
+ [&](xla::XlaOp j, absl::Span<const xla::XlaOp> values,
xla::XlaBuilder* builder) -> xla::StatusOr<std::vector<xla::XlaOp>> {
auto a = values[0];
auto vs = values[1];
@@ -255,7 +255,7 @@ xla::StatusOr<QRBlockResult> QRBlock(
// There is no need to return Y since at termination of the loop it is equal to
// vs.
xla::StatusOr<xla::XlaOp> ComputeWYRepresentation(
- xla::PrimitiveType type, gtl::ArraySlice<int64> batch_dims, xla::XlaOp vs,
+ xla::PrimitiveType type, absl::Span<const int64> batch_dims, xla::XlaOp vs,
xla::XlaOp taus, int64 m, int64 n,
xla::PrecisionConfigProto::Precision precision) {
std::vector<int64> batch_dim_indices(batch_dims.size());
@@ -263,7 +263,7 @@ xla::StatusOr<xla::XlaOp> ComputeWYRepresentation(
int64 n_index = batch_dims.size() + 1;
auto body_fn =
- [&](xla::XlaOp j, gtl::ArraySlice<xla::XlaOp> values,
+ [&](xla::XlaOp j, absl::Span<const xla::XlaOp> values,
xla::XlaBuilder* builder) -> xla::StatusOr<std::vector<xla::XlaOp>> {
auto w = values[0];
auto y = values[1];
diff --git a/tensorflow/compiler/tf2xla/lib/scatter.cc b/tensorflow/compiler/tf2xla/lib/scatter.cc
index bafe5099f2..89df6683d9 100644
--- a/tensorflow/compiler/tf2xla/lib/scatter.cc
+++ b/tensorflow/compiler/tf2xla/lib/scatter.cc
@@ -40,9 +40,9 @@ xla::StatusOr<xla::XlaOp> XlaScatter(
TF_ASSIGN_OR_RETURN(xla::Shape buffer_shape, builder->GetShape(buffer));
TF_RETURN_IF_ERROR(builder->GetShape(updates).status());
TF_ASSIGN_OR_RETURN(xla::Shape indices_shape, builder->GetShape(indices));
- gtl::ArraySlice<int64> indices_dims =
+ absl::Span<const int64> indices_dims =
xla::AsInt64Slice(indices_shape.dimensions());
- gtl::ArraySlice<int64> buffer_dims =
+ absl::Span<const int64> buffer_dims =
xla::AsInt64Slice(buffer_shape.dimensions());
// If the indices are N-dimensional, the minor dimension of indices contains
@@ -107,7 +107,7 @@ xla::StatusOr<xla::XlaOp> XlaScatter(
// index = dynamic-slice(indices, i)
// update = dynamic-slice(updates, i)
// buffer = dynamic-update-slice(buffer, update, index)
- auto body_fn = [&](xla::XlaOp i, gtl::ArraySlice<xla::XlaOp> loop_vars,
+ auto body_fn = [&](xla::XlaOp i, absl::Span<const xla::XlaOp> loop_vars,
xla::XlaBuilder* body_builder) {
auto indices = loop_vars[0];
auto updates = loop_vars[1];
diff --git a/tensorflow/compiler/tf2xla/lib/util.cc b/tensorflow/compiler/tf2xla/lib/util.cc
index 24e5dbbc6d..c267848524 100644
--- a/tensorflow/compiler/tf2xla/lib/util.cc
+++ b/tensorflow/compiler/tf2xla/lib/util.cc
@@ -113,8 +113,8 @@ xla::XlaOp IntegerLiteral(xla::XlaBuilder* builder, xla::PrimitiveType type,
return xla::ConstantLiteral(builder, literal);
}
-xla::XlaOp SliceInMinorDims(xla::XlaOp x, gtl::ArraySlice<int64> start,
- gtl::ArraySlice<int64> end) {
+xla::XlaOp SliceInMinorDims(xla::XlaOp x, absl::Span<const int64> start,
+ absl::Span<const int64> end) {
xla::XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> xla::StatusOr<xla::XlaOp> {
TF_RET_CHECK(start.size() == end.size());
@@ -144,8 +144,8 @@ xla::XlaOp SliceInMinorDims(xla::XlaOp x, gtl::ArraySlice<int64> start,
});
}
-std::vector<int64> ConcatVectors(gtl::ArraySlice<int64> xs,
- gtl::ArraySlice<int64> ys) {
+std::vector<int64> ConcatVectors(absl::Span<const int64> xs,
+ absl::Span<const int64> ys) {
std::vector<int64> output(xs.size() + ys.size());
std::copy(xs.begin(), xs.end(), output.begin());
std::copy(ys.begin(), ys.end(), output.begin() + xs.size());
@@ -153,8 +153,8 @@ std::vector<int64> ConcatVectors(gtl::ArraySlice<int64> xs,
}
xla::XlaOp DynamicSliceInMinorDims(xla::XlaOp x,
- gtl::ArraySlice<xla::XlaOp> starts,
- gtl::ArraySlice<int64> sizes) {
+ absl::Span<const xla::XlaOp> starts,
+ absl::Span<const int64> sizes) {
xla::XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> xla::StatusOr<xla::XlaOp> {
TF_ASSIGN_OR_RETURN(xla::Shape shape, builder->GetShape(x));
@@ -173,7 +173,7 @@ xla::XlaOp DynamicSliceInMinorDims(xla::XlaOp x,
}
xla::XlaOp UpdateSlice(xla::XlaOp x, xla::XlaOp update,
- gtl::ArraySlice<int64> start) {
+ absl::Span<const int64> start) {
xla::XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> xla::StatusOr<xla::XlaOp> {
// TODO(phawkins): make int64 work on all backends, remove the int32 cast.
@@ -191,7 +191,7 @@ xla::XlaOp UpdateSlice(xla::XlaOp x, xla::XlaOp update,
}
xla::XlaOp UpdateSliceInMinorDims(xla::XlaOp x, xla::XlaOp update,
- gtl::ArraySlice<int64> start) {
+ absl::Span<const int64> start) {
xla::XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> xla::StatusOr<xla::XlaOp> {
TF_ASSIGN_OR_RETURN(xla::Shape shape, builder->GetShape(x));
@@ -206,13 +206,13 @@ xla::XlaOp UpdateSliceInMinorDims(xla::XlaOp x, xla::XlaOp update,
}
xla::XlaOp DynamicUpdateSliceInMinorDims(xla::XlaOp x, xla::XlaOp update,
- gtl::ArraySlice<xla::XlaOp> starts) {
+ absl::Span<const xla::XlaOp> starts) {
auto padded_starts = PrependZerosInMajorDims(x, starts);
return xla::DynamicUpdateSlice(x, update, padded_starts);
}
xla::XlaOp PrependZerosInMajorDims(xla::XlaOp x,
- gtl::ArraySlice<xla::XlaOp> starts) {
+ absl::Span<const xla::XlaOp> starts) {
xla::XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> xla::StatusOr<xla::XlaOp> {
TF_ASSIGN_OR_RETURN(xla::Shape shape, builder->GetShape(x));
diff --git a/tensorflow/compiler/tf2xla/lib/util.h b/tensorflow/compiler/tf2xla/lib/util.h
index b4905c9528..bb8ab8b407 100644
--- a/tensorflow/compiler/tf2xla/lib/util.h
+++ b/tensorflow/compiler/tf2xla/lib/util.h
@@ -31,7 +31,7 @@ xla::XlaOp FloatLiteral(xla::XlaBuilder* builder, xla::PrimitiveType type,
// Makes a 1D tensor [0, ..., x, y] from two tensors x and y with zeros
// prepended until the array is length n_dims.
xla::XlaOp PrependZerosInMajorDims(xla::XlaOp x,
- gtl::ArraySlice<xla::XlaOp> starts);
+ absl::Span<const xla::XlaOp> starts);
// Returns a integer scalar constant of 'type' with 'value'.
// If 'type' is complex, returns a real value with zero imaginary component.
@@ -41,33 +41,33 @@ xla::XlaOp IntegerLiteral(xla::XlaBuilder* builder, xla::PrimitiveType type,
// Builds a vector of zeros of length rank(x) with the last values being
// those in `starts`.
xla::XlaOp PrependZerosInMajorDims(xla::XlaOp x,
- gtl::ArraySlice<xla::XlaOp> starts);
+ absl::Span<const xla::XlaOp> starts);
// Performs a slice in the minor dimensions of a Tensor.
-xla::XlaOp SliceInMinorDims(xla::XlaOp x, gtl::ArraySlice<int64> start,
- gtl::ArraySlice<int64> end);
+xla::XlaOp SliceInMinorDims(xla::XlaOp x, absl::Span<const int64> start,
+ absl::Span<const int64> end);
// Returns the concatenation of `xs` and `ys`.
-std::vector<int64> ConcatVectors(gtl::ArraySlice<int64> xs,
- gtl::ArraySlice<int64> ys);
+std::vector<int64> ConcatVectors(absl::Span<const int64> xs,
+ absl::Span<const int64> ys);
// Performs a dynamic slice in the minor dimensions of a Tensor.
xla::XlaOp DynamicSliceInMinorDims(xla::XlaOp x,
- gtl::ArraySlice<xla::XlaOp> starts,
- gtl::ArraySlice<int64> sizes);
+ absl::Span<const xla::XlaOp> starts,
+ absl::Span<const int64> sizes);
// Updates a slice of 'x', i.e.,
// x[start[0], ..., start[n]] = update
xla::XlaOp UpdateSlice(xla::XlaOp x, xla::XlaOp update,
- gtl::ArraySlice<int64> start);
+ absl::Span<const int64> start);
// Updates a slice of 'x', where 'start' contains a list of minor dimensions:
// x[..., start[0], ..., start[n]] = update
xla::XlaOp UpdateSliceInMinorDims(xla::XlaOp x, xla::XlaOp update,
- gtl::ArraySlice<int64> start);
+ absl::Span<const int64> start);
xla::XlaOp DynamicUpdateSliceInMinorDims(xla::XlaOp x, xla::XlaOp update,
- gtl::ArraySlice<xla::XlaOp> starts);
+ absl::Span<const xla::XlaOp> starts);
// Transposes a stack of matrices `x` by swapping the last two dimensions.
xla::XlaOp TransposeInMinorDims(xla::XlaOp x);
diff --git a/tensorflow/compiler/tf2xla/lib/while_loop.cc b/tensorflow/compiler/tf2xla/lib/while_loop.cc
index d64394f140..5300e2c878 100644
--- a/tensorflow/compiler/tf2xla/lib/while_loop.cc
+++ b/tensorflow/compiler/tf2xla/lib/while_loop.cc
@@ -24,7 +24,7 @@ namespace tensorflow {
xla::StatusOr<std::vector<xla::XlaOp>> XlaWhileLoop(
const LoopConditionFunction& condition_function,
const LoopBodyFunction& body_function,
- gtl::ArraySlice<xla::XlaOp> initial_values, StringPiece name,
+ absl::Span<const xla::XlaOp> initial_values, StringPiece name,
xla::XlaBuilder* builder) {
int arity = initial_values.size();
std::vector<xla::Shape> var_shapes;
@@ -84,15 +84,15 @@ xla::StatusOr<std::vector<xla::XlaOp>> XlaWhileLoop(
xla::StatusOr<std::vector<xla::XlaOp>> XlaForEachIndex(
int64 num_iterations, xla::PrimitiveType num_iterations_type,
const ForEachIndexBodyFunction& body_function,
- gtl::ArraySlice<xla::XlaOp> initial_values, StringPiece name,
+ absl::Span<const xla::XlaOp> initial_values, StringPiece name,
xla::XlaBuilder* builder) {
auto while_cond_fn =
- [&](gtl::ArraySlice<xla::XlaOp> values,
+ [&](absl::Span<const xla::XlaOp> values,
xla::XlaBuilder* cond_builder) -> xla::StatusOr<xla::XlaOp> {
return xla::Lt(values[0], IntegerLiteral(cond_builder, num_iterations_type,
num_iterations));
};
- auto while_body_fn = [&](gtl::ArraySlice<xla::XlaOp> values,
+ auto while_body_fn = [&](absl::Span<const xla::XlaOp> values,
xla::XlaBuilder* body_builder)
-> xla::StatusOr<std::vector<xla::XlaOp>> {
xla::XlaOp iteration = values[0];
diff --git a/tensorflow/compiler/tf2xla/lib/while_loop.h b/tensorflow/compiler/tf2xla/lib/while_loop.h
index 9493b1f109..ebaaaee636 100644
--- a/tensorflow/compiler/tf2xla/lib/while_loop.h
+++ b/tensorflow/compiler/tf2xla/lib/while_loop.h
@@ -29,14 +29,14 @@ namespace tensorflow {
// Function that builds a loop condition. Takes as input a sequence of input
// values, and returns a boolean value representing if the condition succeeds.
-typedef std::function<xla::StatusOr<xla::XlaOp>(gtl::ArraySlice<xla::XlaOp>,
+typedef std::function<xla::StatusOr<xla::XlaOp>(absl::Span<const xla::XlaOp>,
xla::XlaBuilder*)>
LoopConditionFunction;
// Function that builds a loop body. Takes as input a sequence of input values
// and returns a sequence of output values.
typedef std::function<xla::StatusOr<std::vector<xla::XlaOp>>(
- gtl::ArraySlice<xla::XlaOp>, xla::XlaBuilder*)>
+ absl::Span<const xla::XlaOp>, xla::XlaBuilder*)>
LoopBodyFunction;
// Helper function for building an XLA while loop, where the values carried by
@@ -50,7 +50,7 @@ typedef std::function<xla::StatusOr<std::vector<xla::XlaOp>>(
xla::StatusOr<std::vector<xla::XlaOp>> XlaWhileLoop(
const LoopConditionFunction& condition_function,
const LoopBodyFunction& body_function,
- gtl::ArraySlice<xla::XlaOp> initial_values, StringPiece name,
+ absl::Span<const xla::XlaOp> initial_values, StringPiece name,
xla::XlaBuilder* builder);
// Builds an XLA loop that repeats a computation `num_iterations` times.
@@ -59,13 +59,13 @@ xla::StatusOr<std::vector<xla::XlaOp>> XlaWhileLoop(
// (current iteration number, loop-carried values), and returns an updated
// vector of the loop-carried values.
typedef std::function<xla::StatusOr<std::vector<xla::XlaOp>>(
- xla::XlaOp, gtl::ArraySlice<xla::XlaOp>, xla::XlaBuilder*)>
+ xla::XlaOp, absl::Span<const xla::XlaOp>, xla::XlaBuilder*)>
ForEachIndexBodyFunction;
xla::StatusOr<std::vector<xla::XlaOp>> XlaForEachIndex(
int64 num_iterations, xla::PrimitiveType num_iterations_type,
const ForEachIndexBodyFunction& body_function,
- gtl::ArraySlice<xla::XlaOp> initial_values, StringPiece name,
+ absl::Span<const xla::XlaOp> initial_values, StringPiece name,
xla::XlaBuilder* builder);
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/literal_util.cc b/tensorflow/compiler/tf2xla/literal_util.cc
index 77da1bf29c..20103ec3ae 100644
--- a/tensorflow/compiler/tf2xla/literal_util.cc
+++ b/tensorflow/compiler/tf2xla/literal_util.cc
@@ -49,9 +49,8 @@ Status HostTensorToMutableBorrowingLiteral(
return Status::OK();
}
-Status HostTensorsToBorrowingLiteralTuple(
- tensorflow::gtl::ArraySlice<Tensor> host_tensors,
- xla::BorrowingLiteral* literal) {
+Status HostTensorsToBorrowingLiteralTuple(absl::Span<const Tensor> host_tensors,
+ xla::BorrowingLiteral* literal) {
std::vector<const char*> buf_ptrs;
buf_ptrs.reserve(host_tensors.size());
std::vector<xla::Shape> tensor_shapes(host_tensors.size());
diff --git a/tensorflow/compiler/tf2xla/literal_util.h b/tensorflow/compiler/tf2xla/literal_util.h
index 09d6fa8116..b4e317cee1 100644
--- a/tensorflow/compiler/tf2xla/literal_util.h
+++ b/tensorflow/compiler/tf2xla/literal_util.h
@@ -43,9 +43,8 @@ Status HostTensorToMutableBorrowingLiteral(
// Returns a BorrowingLiteral tuple that utilizes the same underlying buffers
// owned by 'host_tensors'.
-Status HostTensorsToBorrowingLiteralTuple(
- tensorflow::gtl::ArraySlice<Tensor> host_tensors,
- xla::BorrowingLiteral* literal);
+Status HostTensorsToBorrowingLiteralTuple(absl::Span<const Tensor> host_tensors,
+ xla::BorrowingLiteral* literal);
// Copies 'literal' to freshly allocated 'host_tensor', which is allocated of
// type <target_type>.
diff --git a/tensorflow/compiler/tf2xla/literal_util_test.cc b/tensorflow/compiler/tf2xla/literal_util_test.cc
index a3404c2b3d..7dc16b5a46 100644
--- a/tensorflow/compiler/tf2xla/literal_util_test.cc
+++ b/tensorflow/compiler/tf2xla/literal_util_test.cc
@@ -28,7 +28,7 @@ TEST(LiteralUtil, LiteralToHostTensor) {
{
std::vector<int64> int64_values = {1, 2, 3};
std::unique_ptr<xla::Literal> int64_values_literal =
- xla::LiteralUtil::CreateR1(gtl::ArraySlice<int64>(int64_values));
+ xla::LiteralUtil::CreateR1(absl::Span<const int64>(int64_values));
Tensor host_tensor;
EXPECT_EQ("Cannot convert literal of type S64 to tensor of type int32",
LiteralToHostTensor(*int64_values_literal, DT_INT32, &host_tensor)
@@ -49,7 +49,7 @@ TEST(LiteralUtil, LiteralToHostTensor) {
Tensor host_tensor;
std::vector<int32> int32_values = {10, 11};
std::unique_ptr<xla::Literal> int32_values_literal =
- xla::LiteralUtil::CreateR1(gtl::ArraySlice<int32>(int32_values));
+ xla::LiteralUtil::CreateR1(absl::Span<const int32>(int32_values));
EXPECT_TRUE(
LiteralToHostTensor(*int32_values_literal, DT_INT32, &host_tensor)
.ok());
diff --git a/tensorflow/compiler/tf2xla/xla_compiler.cc b/tensorflow/compiler/tf2xla/xla_compiler.cc
index aa2a521d98..0c300c282e 100644
--- a/tensorflow/compiler/tf2xla/xla_compiler.cc
+++ b/tensorflow/compiler/tf2xla/xla_compiler.cc
@@ -835,8 +835,8 @@ Status XlaCompiler::GetDeviceToHostChannelHandle(const string& key,
namespace {
-void SetTransfer(const string& key, gtl::ArraySlice<DataType> types,
- gtl::ArraySlice<TensorShape> shapes,
+void SetTransfer(const string& key, absl::Span<const DataType> types,
+ absl::Span<const TensorShape> shapes,
tf2xla::HostTransferMetadata* transfer) {
transfer->set_key(key);
CHECK(types.size() == shapes.size());
@@ -850,8 +850,8 @@ void SetTransfer(const string& key, gtl::ArraySlice<DataType> types,
} // namespace
Status XlaCompiler::SetDeviceToHostMetadata(
- const string& key, gtl::ArraySlice<DataType> types,
- gtl::ArraySlice<TensorShape> shapes) {
+ const string& key, absl::Span<const DataType> types,
+ absl::Span<const TensorShape> shapes) {
if (host_compute_sends_.find(key) != host_compute_sends_.end()) {
return errors::InvalidArgument(
"Duplicate calls to SetDeviceToHostMetadata with key ", key);
@@ -877,8 +877,8 @@ Status XlaCompiler::GetDeviceToHostShapes(
}
Status XlaCompiler::SetHostToDeviceMetadata(
- const string& key, gtl::ArraySlice<DataType> types,
- gtl::ArraySlice<TensorShape> shapes) {
+ const string& key, absl::Span<const DataType> types,
+ absl::Span<const TensorShape> shapes) {
if (host_compute_recvs_.find(key) != host_compute_sends_.end()) {
return errors::InvalidArgument(
"Duplicate calls to SetHostToDeviceMetadata with key ", key);
diff --git a/tensorflow/compiler/tf2xla/xla_compiler.h b/tensorflow/compiler/tf2xla/xla_compiler.h
index 9e2c64fd42..8f4a9858ed 100644
--- a/tensorflow/compiler/tf2xla/xla_compiler.h
+++ b/tensorflow/compiler/tf2xla/xla_compiler.h
@@ -351,8 +351,8 @@ class XlaCompiler {
// Sets the shapes and types for the device to host transfer associated with
// 'key'.
Status SetDeviceToHostMetadata(const string& key,
- gtl::ArraySlice<DataType> types,
- gtl::ArraySlice<TensorShape> shapes);
+ absl::Span<const DataType> types,
+ absl::Span<const TensorShape> shapes);
// Gets the shapes the device to host transfer associated with 'key'.
Status GetDeviceToHostShapes(const string& key,
@@ -361,8 +361,8 @@ class XlaCompiler {
// Sets the shapes and types for the host to device transfer associated with
// 'key'.
Status SetHostToDeviceMetadata(const string& key,
- gtl::ArraySlice<DataType> types,
- gtl::ArraySlice<TensorShape> shapes);
+ absl::Span<const DataType> types,
+ absl::Span<const TensorShape> shapes);
// In order to avoid deadlocks from dependencies in host computations, it can
// be necessary to enforce a partial order on the execution of HostCompute
diff --git a/tensorflow/compiler/tf2xla/xla_helpers.cc b/tensorflow/compiler/tf2xla/xla_helpers.cc
index 8efb3d55c8..ea10ca6463 100644
--- a/tensorflow/compiler/tf2xla/xla_helpers.cc
+++ b/tensorflow/compiler/tf2xla/xla_helpers.cc
@@ -119,7 +119,7 @@ xla::XlaOp XlaHelpers::FloatLiteral(xla::XlaBuilder* b, DataType data_type,
}
/* static */ Status XlaHelpers::ReshapeLiteral(
- const xla::Literal& input, gtl::ArraySlice<int64> dimensions,
+ const xla::Literal& input, absl::Span<const int64> dimensions,
xla::Literal* output) {
if (xla::ShapeUtil::IsTuple(input.shape())) {
return errors::InvalidArgument("ReshapeLiteral does not support tuples.");
diff --git a/tensorflow/compiler/tf2xla/xla_helpers.h b/tensorflow/compiler/tf2xla/xla_helpers.h
index e6522157a5..e22b1f0f37 100644
--- a/tensorflow/compiler/tf2xla/xla_helpers.h
+++ b/tensorflow/compiler/tf2xla/xla_helpers.h
@@ -50,7 +50,7 @@ class XlaHelpers {
// Reshapes literal 'input' to have 'shape'. Both the original shape and
// 'shape' must contain the same number of elements.
static Status ReshapeLiteral(const xla::Literal& input,
- gtl::ArraySlice<int64> shape,
+ absl::Span<const int64> shape,
xla::Literal* output);
// Returns the argmax of `input` along `axis`. `output_type` is the type to
diff --git a/tensorflow/compiler/tf2xla/xla_op_kernel.cc b/tensorflow/compiler/tf2xla/xla_op_kernel.cc
index 9e8f5f2a1a..1499c99ed1 100644
--- a/tensorflow/compiler/tf2xla/xla_op_kernel.cc
+++ b/tensorflow/compiler/tf2xla/xla_op_kernel.cc
@@ -119,7 +119,7 @@ Status XlaOpKernelContext::ConstantInput(StringPiece name,
}
Status XlaOpKernelContext::ConstantInputReshaped(
- int index, gtl::ArraySlice<int64> new_dims,
+ int index, absl::Span<const int64> new_dims,
xla::Literal* constant_literal) {
const Tensor& tensor = context_->input(index);
TensorShape new_shape(new_dims);
diff --git a/tensorflow/compiler/tf2xla/xla_op_kernel.h b/tensorflow/compiler/tf2xla/xla_op_kernel.h
index 3e26ba4f01..45cfa7da74 100644
--- a/tensorflow/compiler/tf2xla/xla_op_kernel.h
+++ b/tensorflow/compiler/tf2xla/xla_op_kernel.h
@@ -113,7 +113,7 @@ class XlaOpKernelContext {
// cannot be evaluated, e.g., because it depends on unbound parameters,
// returns a non-Ok status. If InputShape(index).num_elements() !=
// new_shape.num_elements(), returns an error status.
- Status ConstantInputReshaped(int index, gtl::ArraySlice<int64> new_shape,
+ Status ConstantInputReshaped(int index, absl::Span<const int64> new_dims,
xla::Literal* constant_literal);
// Converts a constant scalar int32 or int64 tensor into an int64.
diff --git a/tensorflow/compiler/tf2xla/xla_op_registry.cc b/tensorflow/compiler/tf2xla/xla_op_registry.cc
index 2f3a4cd3b5..dae2d956ca 100644
--- a/tensorflow/compiler/tf2xla/xla_op_registry.cc
+++ b/tensorflow/compiler/tf2xla/xla_op_registry.cc
@@ -105,7 +105,7 @@ XlaOpRegistry::~XlaOpRegistry() = default;
/* static */ void XlaOpRegistry::RegisterBackend(
const string& compilation_device_name,
- gtl::ArraySlice<DataType> supported_types, BackendOpFilter op_filter) {
+ absl::Span<const DataType> supported_types, BackendOpFilter op_filter) {
XlaOpRegistry& registry = Instance();
mutex_lock lock(registry.mutex_);
auto result = registry.backends_.emplace(compilation_device_name, Backend());
@@ -382,7 +382,7 @@ XlaOpRegistrationBuilder XlaOpRegistrationBuilder::Name(StringPiece name) {
}
XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::Device(
- gtl::ArraySlice<StringPiece> devices) {
+ absl::Span<const StringPiece> devices) {
registration_->has_device_whitelist = true;
for (StringPiece device : devices) {
registration_->device_whitelist.emplace(device);
@@ -415,7 +415,7 @@ XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::TypeConstraint(
}
XlaOpRegistrationBuilder& XlaOpRegistrationBuilder::TypeConstraint(
- StringPiece attr_name, gtl::ArraySlice<DataType> allowed) {
+ StringPiece attr_name, absl::Span<const DataType> allowed) {
std::set<DataType>& types =
registration_->type_constraints[string(attr_name)];
for (DataType t : allowed) {
@@ -452,7 +452,7 @@ XlaOpRegistrar::XlaOpRegistrar(
}
XlaBackendRegistrar::XlaBackendRegistrar(
- StringPiece name, gtl::ArraySlice<DataType> types,
+ StringPiece name, absl::Span<const DataType> types,
XlaOpRegistry::BackendOpFilter op_filter) {
XlaOpRegistry& registry = XlaOpRegistry::Instance();
registry.RegisterBackend(string(name), types, op_filter);
diff --git a/tensorflow/compiler/tf2xla/xla_op_registry.h b/tensorflow/compiler/tf2xla/xla_op_registry.h
index 6ce0e2580b..c640842dc0 100644
--- a/tensorflow/compiler/tf2xla/xla_op_registry.h
+++ b/tensorflow/compiler/tf2xla/xla_op_registry.h
@@ -94,7 +94,7 @@ class XlaOpRegistry {
// the device; it may optionally modify the KernelDef.
typedef bool (*BackendOpFilter)(KernelDef* kdef);
static void RegisterBackend(const string& compilation_device_name,
- gtl::ArraySlice<DataType> supported_types,
+ absl::Span<const DataType> supported_types,
BackendOpFilter op_filter);
// Returns the names of the registered backends.
@@ -236,7 +236,7 @@ class XlaOpRegistrationBuilder {
// Specifies a whitelist of devices on which the operator may run.
XlaOpRegistrationBuilder& Device(StringPiece devices);
- XlaOpRegistrationBuilder& Device(gtl::ArraySlice<StringPiece> devices);
+ XlaOpRegistrationBuilder& Device(absl::Span<const StringPiece> devices);
// Specifies a type constraint for a type variable attribute. Each constraint
// specifies the set of types that the type variable may assume.
@@ -244,7 +244,7 @@ class XlaOpRegistrationBuilder {
DataType allowed);
XlaOpRegistrationBuilder& TypeConstraint(StringPiece attr_name,
- gtl::ArraySlice<DataType> allowed);
+ absl::Span<const DataType> allowed);
// Specifies that a dummy copy of this operator should not be registered on
// XLA_* devices, but may be used during compilation.
@@ -288,7 +288,7 @@ class XlaOpRegistrar {
class XlaBackendRegistrar {
public:
- XlaBackendRegistrar(StringPiece name, gtl::ArraySlice<DataType> types,
+ XlaBackendRegistrar(StringPiece name, absl::Span<const DataType> types,
XlaOpRegistry::BackendOpFilter op_filter = nullptr);
};
diff --git a/tensorflow/compiler/xla/array.h b/tensorflow/compiler/xla/array.h
index c8e483712e..0ec04f42a8 100644
--- a/tensorflow/compiler/xla/array.h
+++ b/tensorflow/compiler/xla/array.h
@@ -97,12 +97,11 @@ class Array {
using value_type = T;
// Creates a new array with the specified dimensions.
- explicit Array(tensorflow::gtl::ArraySlice<int64> sizes)
- : Array(sizes, T()) {}
+ explicit Array(absl::Span<const int64> sizes) : Array(sizes, T()) {}
// Creates a new array with the specified dimensions and specified value for
// every cell.
- Array(tensorflow::gtl::ArraySlice<int64> sizes, T value)
+ Array(absl::Span<const int64> sizes, T value)
: sizes_(sizes.begin(), sizes.end()), values_(new T[num_elements()]) {
Fill(value);
}
@@ -301,7 +300,7 @@ class Array {
// Invokes a callback with the (indices, value_ptr) for each cell in the
// array.
- void Each(std::function<void(tensorflow::gtl::ArraySlice<int64>, T*)> f) {
+ void Each(std::function<void(absl::Span<const int64>, T*)> f) {
std::vector<int64> index(sizes_.size());
for (int64 i = 0; i < num_elements(); ++i, next_index(&index)) {
f(index, &values_[i]);
@@ -309,8 +308,7 @@ class Array {
}
// Invokes a callback with the (indices, value) for each cell in the array.
- void Each(
- std::function<void(tensorflow::gtl::ArraySlice<int64>, T)> f) const {
+ void Each(std::function<void(absl::Span<const int64>, T)> f) const {
std::vector<int64> index(sizes_.size());
for (int64 i = 0; i < num_elements(); ++i, next_index(&index)) {
f(index, values_[i]);
@@ -320,8 +318,7 @@ class Array {
// Invokes a callback with the (indices, value_ptr) for each cell in the
// array. If a callback returns a non-OK status, returns that else returns
// Status::OK().
- Status EachStatus(
- std::function<Status(tensorflow::gtl::ArraySlice<int64>, T*)> f) {
+ Status EachStatus(std::function<Status(absl::Span<const int64>, T*)> f) {
std::vector<int64> index(sizes_.size());
for (int64 i = 0; i < num_elements(); ++i, next_index(&index)) {
Status s = f(index, &values_[i]);
@@ -335,8 +332,7 @@ class Array {
// Invokes a callback with the (indices, value) for each cell in the array.
// If a callback returns a non-OK status, returns that else returns
// Status::OK().
- Status EachStatus(
- std::function<Status(tensorflow::gtl::ArraySlice<int64>, T)> f) const {
+ Status EachStatus(std::function<Status(absl::Span<const int64>, T)> f) const {
std::vector<int64> index(sizes_.size());
for (int64 i = 0; i < num_elements(); ++i, next_index(&index)) {
Status s = f(index, values_[i]);
@@ -377,13 +373,13 @@ class Array {
// Returns the value at the cell specified by the indexes. The number of
// arguments have to match with the number of dimensions for the array.
- const T& operator()(tensorflow::gtl::ArraySlice<int64> indexes) const {
+ const T& operator()(absl::Span<const int64> indexes) const {
return values_[calculate_index(indexes)];
}
// Returns the value at the cell specified by the indexes. The number of
// arguments have to match with the number of dimensions for the array.
- T& operator()(tensorflow::gtl::ArraySlice<int64> indexes) {
+ T& operator()(absl::Span<const int64> indexes) {
return values_[calculate_index(indexes)];
}
@@ -438,8 +434,8 @@ class Array {
bool operator!=(const Array<T>& other) const { return !(*this == other); }
// Performs the equivalent of a slice operation on this array.
- Array<T> Slice(tensorflow::gtl::ArraySlice<int64> starts,
- tensorflow::gtl::ArraySlice<int64> limits) const {
+ Array<T> Slice(absl::Span<const int64> starts,
+ absl::Span<const int64> limits) const {
CHECK_EQ(starts.size(), num_dimensions());
CHECK_EQ(limits.size(), num_dimensions());
@@ -464,7 +460,7 @@ class Array {
// Performs the equivalent of a DynamicUpdateSlice in-place on this array.
void UpdateSlice(const Array<T>& from,
- tensorflow::gtl::ArraySlice<int64> start_indices) {
+ absl::Span<const int64> start_indices) {
CHECK_EQ(from.num_dimensions(), num_dimensions());
std::vector<int64> limit_indices;
std::transform(start_indices.begin(), start_indices.end(),
@@ -484,7 +480,7 @@ class Array {
// Performs an in-place reshape, modifying the dimensions but not the
// underlying data.
- void Reshape(tensorflow::gtl::ArraySlice<int64> new_dimensions) {
+ void Reshape(absl::Span<const int64> new_dimensions) {
int64 old_num_elements = num_elements();
sizes_ = std::vector<int64>(new_dimensions.begin(), new_dimensions.end());
CHECK_EQ(num_elements(), old_num_elements);
diff --git a/tensorflow/compiler/xla/array4d_test.cc b/tensorflow/compiler/xla/array4d_test.cc
index 927733ea1e..3ab67b128f 100644
--- a/tensorflow/compiler/xla/array4d_test.cc
+++ b/tensorflow/compiler/xla/array4d_test.cc
@@ -27,8 +27,7 @@ namespace {
// Given an Array4D and a 4-tuple index, computes the linear index into the
// array idx represents.
template <typename T>
-int64 Array4DLinearIndex(const Array4D<T>& arr,
- tensorflow::gtl::ArraySlice<int64> idx) {
+int64 Array4DLinearIndex(const Array4D<T>& arr, absl::Span<const int64> idx) {
EXPECT_EQ(4, idx.size());
return (idx[3] + idx[2] * arr.n4() + idx[1] * arr.n3() * arr.n4() +
idx[0] * arr.n2() * arr.n3() * arr.n4());
@@ -51,9 +50,8 @@ TEST(Array4dTest, FillCtor) {
EXPECT_EQ(fullof7.n3(), 4);
EXPECT_EQ(fullof7.n4(), 5);
- fullof7.Each([](tensorflow::gtl::ArraySlice<int64> idx, int* cell) {
- EXPECT_EQ(*cell, 7);
- });
+ fullof7.Each(
+ [](absl::Span<const int64> idx, int* cell) { EXPECT_EQ(*cell, 7); });
}
TEST(Array4dTest, ContainerCtor) {
@@ -69,7 +67,7 @@ TEST(Array4dTest, ContainerCtor) {
EXPECT_EQ(arr.n3(), 4);
EXPECT_EQ(arr.n4(), 5);
- arr.Each([&arr](tensorflow::gtl::ArraySlice<int64> idx, int* cell) {
+ arr.Each([&arr](absl::Span<const int64> idx, int* cell) {
EXPECT_EQ(*cell, Array4DLinearIndex(arr, idx));
});
}
@@ -129,21 +127,19 @@ TEST(Array3dTest, InitializerListCtorHalf) {
TEST(Array4dTest, Fill) {
Array4D<int> fullof7(2, 3, 4, 5, 7);
- fullof7.Each([](tensorflow::gtl::ArraySlice<int64> idx, int* cell) {
- EXPECT_EQ(*cell, 7);
- });
+ fullof7.Each(
+ [](absl::Span<const int64> idx, int* cell) { EXPECT_EQ(*cell, 7); });
fullof7.Fill(11);
- fullof7.Each([](tensorflow::gtl::ArraySlice<int64> idx, int* cell) {
- EXPECT_EQ(*cell, 11);
- });
+ fullof7.Each(
+ [](absl::Span<const int64> idx, int* cell) { EXPECT_EQ(*cell, 11); });
}
TEST(Array4dTest, FillWithMultiples) {
Array4D<float> arr(2, 3, 4, 5);
arr.FillWithMultiples(2.0f);
- arr.Each([&arr](tensorflow::gtl::ArraySlice<int64> idx, float* cell) {
+ arr.Each([&arr](absl::Span<const int64> idx, float* cell) {
EXPECT_EQ(*cell, 2.0f * Array4DLinearIndex(arr, idx));
});
}
diff --git a/tensorflow/compiler/xla/array_test.cc b/tensorflow/compiler/xla/array_test.cc
index e8356c9832..2d0ac98bd4 100644
--- a/tensorflow/compiler/xla/array_test.cc
+++ b/tensorflow/compiler/xla/array_test.cc
@@ -163,7 +163,7 @@ TEST(ArrayTest, Each) {
arr.FillWithMultiples(1);
int64 each_count = 0, each_sum = 0;
- arr.Each([&](tensorflow::gtl::ArraySlice<int64> idx, int cell) {
+ arr.Each([&](absl::Span<const int64> idx, int cell) {
int64 lin_idx = idx[0] * 12 + idx[1] * 4 + idx[2];
EXPECT_EQ(lin_idx, cell);
each_count++;
diff --git a/tensorflow/compiler/xla/client/client.cc b/tensorflow/compiler/xla/client/client.cc
index 1fdf8f6260..8818f81312 100644
--- a/tensorflow/compiler/xla/client/client.cc
+++ b/tensorflow/compiler/xla/client/client.cc
@@ -163,8 +163,7 @@ Status Client::ResetDevice() {
}
StatusOr<std::unique_ptr<Literal>> Client::ExecuteAndTransfer(
- const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments,
+ const XlaComputation& computation, absl::Span<GlobalData* const> arguments,
const ExecutionOptions* execution_options,
ExecutionProfile* execution_profile) {
TF_ASSIGN_OR_RETURN(
@@ -212,8 +211,7 @@ StatusOr<XlaComputation> Client::LoadSnapshot(const HloSnapshot& module) {
}
StatusOr<std::unique_ptr<GlobalData>> Client::Execute(
- const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments,
+ const XlaComputation& computation, absl::Span<GlobalData* const> arguments,
const ExecutionOptions* execution_options,
ExecutionProfile* execution_profile) {
ExecuteGraphRequest request;
@@ -252,7 +250,7 @@ StatusOr<std::unique_ptr<GlobalData>> Client::Execute(
}
StatusOr<std::vector<std::unique_ptr<GlobalData>>> Client::ExecuteParallel(
- tensorflow::gtl::ArraySlice<XlaComputationInstance> computations) {
+ absl::Span<const XlaComputationInstance> computations) {
ExecuteGraphParallelRequest request;
for (const XlaComputationInstance& computation : computations) {
diff --git a/tensorflow/compiler/xla/client/client.h b/tensorflow/compiler/xla/client/client.h
index be50cebfcc..940ddbedd1 100644
--- a/tensorflow/compiler/xla/client/client.h
+++ b/tensorflow/compiler/xla/client/client.h
@@ -53,7 +53,7 @@ class Client {
// will be filled with profile data from the execution.
StatusOr<std::unique_ptr<GlobalData>> Execute(
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments,
+ absl::Span<GlobalData* const> arguments,
const ExecutionOptions* execution_options = nullptr,
ExecutionProfile* execution_profile = nullptr);
@@ -82,7 +82,7 @@ class Client {
// from each computation.
//
StatusOr<std::vector<std::unique_ptr<GlobalData>>> ExecuteParallel(
- tensorflow::gtl::ArraySlice<XlaComputationInstance> computations);
+ absl::Span<const XlaComputationInstance> computations);
// Requests device_count device handles available on the target. The returned
// device handles are used to specify the devices to execute the computations
@@ -134,7 +134,7 @@ class Client {
// Execute() and Transfer().
StatusOr<std::unique_ptr<Literal>> ExecuteAndTransfer(
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments,
+ absl::Span<GlobalData* const> arguments,
const ExecutionOptions* execution_options = nullptr,
ExecutionProfile* execution_profile = nullptr);
diff --git a/tensorflow/compiler/xla/client/compile_only_client.cc b/tensorflow/compiler/xla/client/compile_only_client.cc
index 040344c9a6..a6c58cb175 100644
--- a/tensorflow/compiler/xla/client/compile_only_client.cc
+++ b/tensorflow/compiler/xla/client/compile_only_client.cc
@@ -23,7 +23,7 @@ namespace xla {
StatusOr<std::vector<std::unique_ptr<AotCompilationResult>>>
CompileOnlyClient::CompileAheadOfTime(
- const tensorflow::gtl::ArraySlice<AotXlaComputationInstance> computations,
+ const absl::Span<const AotXlaComputationInstance> computations,
const AotCompilationOptions& options,
std::unique_ptr<AotCompilationMetadata>* metadata) {
std::vector<CompileOnlyService::AotXlaComputationInstance> service_instances;
diff --git a/tensorflow/compiler/xla/client/compile_only_client.h b/tensorflow/compiler/xla/client/compile_only_client.h
index d0c83cbfcc..9e3ed23734 100644
--- a/tensorflow/compiler/xla/client/compile_only_client.h
+++ b/tensorflow/compiler/xla/client/compile_only_client.h
@@ -52,7 +52,7 @@ class CompileOnlyClient : public Client {
// code. |metadata|, if provided, is populated during compilation.
StatusOr<std::vector<std::unique_ptr<AotCompilationResult>>>
CompileAheadOfTime(
- const tensorflow::gtl::ArraySlice<AotXlaComputationInstance> computations,
+ const absl::Span<const AotXlaComputationInstance> computations,
const AotCompilationOptions& options,
std::unique_ptr<AotCompilationMetadata>* metadata = nullptr);
diff --git a/tensorflow/compiler/xla/client/executable_build_options.h b/tensorflow/compiler/xla/client/executable_build_options.h
index 888d2f28eb..93334db88b 100644
--- a/tensorflow/compiler/xla/client/executable_build_options.h
+++ b/tensorflow/compiler/xla/client/executable_build_options.h
@@ -86,7 +86,7 @@ class ExecutableBuildOptions {
void add_disabled_hlo_pass(absl::string_view pass_name) {
disabled_hlo_passes_.push_back(std::string(pass_name));
}
- const tensorflow::gtl::ArraySlice<std::string> disabled_hlo_passes() const {
+ const absl::Span<const std::string> disabled_hlo_passes() const {
return disabled_hlo_passes_;
}
diff --git a/tensorflow/compiler/xla/client/lib/math.cc b/tensorflow/compiler/xla/client/lib/math.cc
index e569610b85..d3d7edb42a 100644
--- a/tensorflow/compiler/xla/client/lib/math.cc
+++ b/tensorflow/compiler/xla/client/lib/math.cc
@@ -69,8 +69,7 @@ std::array<float, 6> kErfUCoefficient = {
// Evaluate the polynomial given coefficients and `x`.
// N.B. Coefficients should be supplied in decreasing order.
-XlaOp EvaluatePolynomial(XlaOp x,
- tensorflow::gtl::ArraySlice<float> coefficients) {
+XlaOp EvaluatePolynomial(XlaOp x, absl::Span<const float> coefficients) {
XlaOp poly = ScalarLike(x, 0.0);
for (float c : coefficients) {
poly = poly * x + ScalarLike(x, c);
diff --git a/tensorflow/compiler/xla/client/lib/math.h b/tensorflow/compiler/xla/client/lib/math.h
index 13db232556..a6cafd4207 100644
--- a/tensorflow/compiler/xla/client/lib/math.h
+++ b/tensorflow/compiler/xla/client/lib/math.h
@@ -34,8 +34,7 @@ XlaOp Reciprocal(XlaOp operand);
// Evaluates a polynomial given coefficients and `x`.
// N.B. Coefficients should be supplied in decreasing order.
-XlaOp EvaluatePolynomial(XlaOp x,
- tensorflow::gtl::ArraySlice<float> coefficients);
+XlaOp EvaluatePolynomial(XlaOp x, absl::Span<const float> coefficients);
// Computes an approximation of the error function complement (1 - erf(x)).
XlaOp Erfc(XlaOp x);
diff --git a/tensorflow/compiler/xla/client/lib/numeric.cc b/tensorflow/compiler/xla/client/lib/numeric.cc
index 7f90d6c197..a7a89481c9 100644
--- a/tensorflow/compiler/xla/client/lib/numeric.cc
+++ b/tensorflow/compiler/xla/client/lib/numeric.cc
@@ -39,7 +39,7 @@ XlaOp GetMatrixDiagonal(XlaOp x) {
TF_RET_CHECK(n_dims >= 2);
const int64 m = shape.dimensions(n_dims - 2);
const int64 n = shape.dimensions(n_dims - 1);
- tensorflow::gtl::ArraySlice<int64> major_dims =
+ absl::Span<const int64> major_dims =
AsInt64Slice(shape.dimensions()).subspan(/*pos=*/0, /*len=*/n_dims - 2);
auto a = Iota(builder, U32, n);
auto b = Iota(builder, U32, m);
@@ -66,7 +66,7 @@ XlaOp Triangle(XlaOp x, bool lower) {
TF_RET_CHECK(n_dims >= 2);
const int64 m = shape.dimensions(n_dims - 2);
const int64 n = shape.dimensions(n_dims - 1);
- tensorflow::gtl::ArraySlice<int64> major_dims =
+ absl::Span<const int64> major_dims =
AsInt64Slice(shape.dimensions()).subspan(/*pos=*/0, /*len=*/n_dims - 2);
auto a = Iota(builder, U32, n);
auto b = Iota(builder, U32, m);
diff --git a/tensorflow/compiler/xla/client/lib/pooling.cc b/tensorflow/compiler/xla/client/lib/pooling.cc
index 3ae9ae36f6..1979c867a4 100644
--- a/tensorflow/compiler/xla/client/lib/pooling.cc
+++ b/tensorflow/compiler/xla/client/lib/pooling.cc
@@ -26,11 +26,9 @@ namespace {
// element of an image by the count of elements that contributed to that
// element during pooling.
XlaOp AvgPoolDivideByCountWithGeneralPadding(
- XlaOp sums, PrimitiveType dtype,
- tensorflow::gtl::ArraySlice<int64> input_shape,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> spatial_padding,
- tensorflow::gtl::ArraySlice<int64> ksize,
- tensorflow::gtl::ArraySlice<int64> stride,
+ XlaOp sums, PrimitiveType dtype, absl::Span<const int64> input_shape,
+ absl::Span<const std::pair<int64, int64>> spatial_padding,
+ absl::Span<const int64> ksize, absl::Span<const int64> stride,
const TensorFormat& data_format) {
// The padding shouldn't be included in the counts. We use another
// ReduceWindow to find the right counts.
@@ -73,8 +71,8 @@ XlaOp AvgPoolDivideByCountWithGeneralPadding(
// Sums all elements in the window specified by 'kernel_size' and 'stride'.
XlaOp ComputeSums(XlaOp operand, XlaOp init_value,
- tensorflow::gtl::ArraySlice<int64> kernel_size,
- tensorflow::gtl::ArraySlice<int64> stride,
+ absl::Span<const int64> kernel_size,
+ absl::Span<const int64> stride,
const TensorFormat& data_format) {
XlaBuilder* b = operand.builder();
return b->ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
@@ -89,8 +87,8 @@ XlaOp ComputeSums(XlaOp operand, XlaOp init_value,
// Creates a padding configuration out of spatial padding values.
PaddingConfig MakeSpatialPaddingConfig(
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> spatial_padding,
- int num_spatial_dims, tensorflow::gtl::ArraySlice<int64> stride,
+ absl::Span<const std::pair<int64, int64>> spatial_padding,
+ int num_spatial_dims, absl::Span<const int64> stride,
const TensorFormat& data_format) {
PaddingConfig padding_config;
for (int i = 0; i < 2 + num_spatial_dims; ++i) {
@@ -107,13 +105,12 @@ PaddingConfig MakeSpatialPaddingConfig(
return padding_config;
}
-XlaOp AvgPoolDivideByCount(
- XlaOp pooled, tensorflow::gtl::ArraySlice<int64> input_size,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
- PrimitiveType dtype, const TensorFormat& data_format,
- bool counts_include_padding) {
+XlaOp AvgPoolDivideByCount(XlaOp pooled, absl::Span<const int64> input_size,
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding,
+ PrimitiveType dtype, const TensorFormat& data_format,
+ bool counts_include_padding) {
if (counts_include_padding) {
// If counts include padding, all windows have the same number of elements
// contributing to each average. Divide by the window size everywhere to get
@@ -133,8 +130,8 @@ XlaOp AvgPoolDivideByCount(
} // namespace
-XlaOp MaxPool(XlaOp operand, tensorflow::gtl::ArraySlice<int64> kernel_size,
- tensorflow::gtl::ArraySlice<int64> stride, Padding padding,
+XlaOp MaxPool(XlaOp operand, absl::Span<const int64> kernel_size,
+ absl::Span<const int64> stride, Padding padding,
const TensorFormat& data_format) {
XlaBuilder* b = operand.builder();
return b->ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
@@ -147,9 +144,9 @@ XlaOp MaxPool(XlaOp operand, tensorflow::gtl::ArraySlice<int64> kernel_size,
});
}
-XlaOp AvgPool(XlaOp operand, tensorflow::gtl::ArraySlice<int64> kernel_size,
- tensorflow::gtl::ArraySlice<int64> stride,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
+XlaOp AvgPool(XlaOp operand, absl::Span<const int64> kernel_size,
+ absl::Span<const int64> stride,
+ absl::Span<const std::pair<int64, int64>> padding,
const TensorFormat& data_format,
const bool counts_include_padding) {
XlaBuilder* b = operand.builder();
@@ -173,9 +170,8 @@ XlaOp AvgPool(XlaOp operand, tensorflow::gtl::ArraySlice<int64> kernel_size,
}
std::vector<std::pair<int64, int64>> MakeSpatialPadding(
- tensorflow::gtl::ArraySlice<int64> input_size,
- tensorflow::gtl::ArraySlice<int64> kernel_size,
- tensorflow::gtl::ArraySlice<int64> stride, Padding padding,
+ absl::Span<const int64> input_size, absl::Span<const int64> kernel_size,
+ absl::Span<const int64> stride, Padding padding,
const TensorFormat& data_format) {
const int num_spatial_dims = kernel_size.size() - 2;
std::vector<int64> input_spatial_dimensions;
@@ -193,12 +189,12 @@ std::vector<std::pair<int64, int64>> MakeSpatialPadding(
stride_spatial_dimensions, padding);
}
-XlaOp AvgPoolGrad(
- XlaOp out_backprop, tensorflow::gtl::ArraySlice<int64> gradients_size,
- tensorflow::gtl::ArraySlice<int64> kernel_size,
- tensorflow::gtl::ArraySlice<int64> stride,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> spatial_padding,
- const TensorFormat& data_format, const bool counts_include_padding) {
+XlaOp AvgPoolGrad(XlaOp out_backprop, absl::Span<const int64> gradients_size,
+ absl::Span<const int64> kernel_size,
+ absl::Span<const int64> stride,
+ absl::Span<const std::pair<int64, int64>> spatial_padding,
+ const TensorFormat& data_format,
+ const bool counts_include_padding) {
XlaBuilder* b = out_backprop.builder();
return b->ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
const int num_dims = kernel_size.size();
diff --git a/tensorflow/compiler/xla/client/lib/pooling.h b/tensorflow/compiler/xla/client/lib/pooling.h
index 291c711a00..5c0054857d 100644
--- a/tensorflow/compiler/xla/client/lib/pooling.h
+++ b/tensorflow/compiler/xla/client/lib/pooling.h
@@ -25,7 +25,7 @@ namespace xla {
class TensorFormat {
public:
TensorFormat(int batch_dimension, int feature_dimension,
- tensorflow::gtl::ArraySlice<int64> spatial_dimensions)
+ absl::Span<const int64> spatial_dimensions)
: batch_dimension_(batch_dimension),
feature_dimension_(feature_dimension),
spatial_dimensions_(spatial_dimensions.begin(),
@@ -49,32 +49,31 @@ class TensorFormat {
};
// Computes the max pool of 'operand'.
-XlaOp MaxPool(XlaOp operand, tensorflow::gtl::ArraySlice<int64> kernel_size,
- tensorflow::gtl::ArraySlice<int64> stride, Padding padding,
+XlaOp MaxPool(XlaOp operand, absl::Span<const int64> kernel_size,
+ absl::Span<const int64> stride, Padding padding,
const TensorFormat& data_format);
// Computes the average pool of 'operand'.
-XlaOp AvgPool(XlaOp operand, tensorflow::gtl::ArraySlice<int64> kernel_size,
- tensorflow::gtl::ArraySlice<int64> stride,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
+XlaOp AvgPool(XlaOp operand, absl::Span<const int64> kernel_size,
+ absl::Span<const int64> stride,
+ absl::Span<const std::pair<int64, int64>> padding,
const TensorFormat& data_format,
const bool counts_include_padding);
// Returns the list of low and high padding elements in each spatial dimension
// for the given 'padding' specification.
std::vector<std::pair<int64, int64>> MakeSpatialPadding(
- tensorflow::gtl::ArraySlice<int64> input_size,
- tensorflow::gtl::ArraySlice<int64> kernel_size,
- tensorflow::gtl::ArraySlice<int64> stride, Padding padding,
+ absl::Span<const int64> input_size, absl::Span<const int64> kernel_size,
+ absl::Span<const int64> stride, Padding padding,
const TensorFormat& data_format);
// Computes the average pool gradient.
-XlaOp AvgPoolGrad(
- XlaOp out_backprop, tensorflow::gtl::ArraySlice<int64> gradients_size,
- tensorflow::gtl::ArraySlice<int64> kernel_size,
- tensorflow::gtl::ArraySlice<int64> stride,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> spatial_padding,
- const TensorFormat& data_format, const bool counts_include_padding);
+XlaOp AvgPoolGrad(XlaOp out_backprop, absl::Span<const int64> gradients_size,
+ absl::Span<const int64> kernel_size,
+ absl::Span<const int64> stride,
+ absl::Span<const std::pair<int64, int64>> spatial_padding,
+ const TensorFormat& data_format,
+ const bool counts_include_padding);
} // namespace xla
diff --git a/tensorflow/compiler/xla/client/lib/pooling_test.cc b/tensorflow/compiler/xla/client/lib/pooling_test.cc
index 1890047918..30adb9b1ad 100644
--- a/tensorflow/compiler/xla/client/lib/pooling_test.cc
+++ b/tensorflow/compiler/xla/client/lib/pooling_test.cc
@@ -32,8 +32,8 @@ TensorFormat MakeNCHWFormat(int num_spatial_dims) {
}
std::vector<std::pair<int64, int64>> MakeGeneralPadding(
- XlaOp input, tensorflow::gtl::ArraySlice<int64> kernel_size,
- tensorflow::gtl::ArraySlice<int64> stride, Padding padding,
+ XlaOp input, absl::Span<const int64> kernel_size,
+ absl::Span<const int64> stride, Padding padding,
const xla::TensorFormat& data_format) {
XlaBuilder* b = input.builder();
Shape operand_shape = b->GetShape(input).ValueOrDie();
@@ -46,7 +46,7 @@ std::vector<std::pair<int64, int64>> MakeGeneralPadding(
// Add singleton batch and feature dimensions to spatial dimensions, according
// to 'data_format' specification.
std::vector<int64> ExpandWithBatchAndFeatureDimensions(
- tensorflow::gtl::ArraySlice<int64> spatial_dim_sizes,
+ absl::Span<const int64> spatial_dim_sizes,
const xla::TensorFormat& data_format) {
const int num_spatial_dims = spatial_dim_sizes.size();
std::vector<int64> tensor_sizes(num_spatial_dims + 2, 1);
diff --git a/tensorflow/compiler/xla/client/local_client.cc b/tensorflow/compiler/xla/client/local_client.cc
index db7a8fc047..4402ba8762 100644
--- a/tensorflow/compiler/xla/client/local_client.cc
+++ b/tensorflow/compiler/xla/client/local_client.cc
@@ -51,7 +51,7 @@ LocalExecutable::LocalExecutable(std::unique_ptr<Executable> executable,
}
Status LocalExecutable::ValidateExecutionOptions(
- const tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
+ const absl::Span<const ShapedBuffer* const> arguments,
const ExecutableRunOptions& run_options, const Backend& backend) {
const ComputationLayout& computation_layout =
executable_->module_config().entry_computation_layout();
@@ -140,7 +140,7 @@ Status LocalExecutable::ValidateExecutionOptions(
}
StatusOr<ScopedShapedBuffer> LocalExecutable::Run(
- const tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
+ const absl::Span<const ShapedBuffer* const> arguments,
ExecutableRunOptions run_options) {
TF_RETURN_IF_ERROR(
ValidateExecutionOptions(arguments, run_options, *backend_));
@@ -177,7 +177,7 @@ StatusOr<ScopedShapedBuffer> LocalExecutable::Run(
StatusOr<ScopedShapedBuffer> LocalExecutable::ExecuteAndDump(
const ServiceExecutableRunOptions* run_options,
- const tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments) {
+ const absl::Span<const ShapedBuffer* const> arguments) {
executable_->hlo_snapshot()->set_execution_platform(
backend_->platform()->Name());
TF_RETURN_IF_ERROR(RecordArguments(arguments, executable_->hlo_snapshot()));
@@ -191,7 +191,7 @@ StatusOr<ScopedShapedBuffer> LocalExecutable::ExecuteAndDump(
}
Status LocalExecutable::RecordArguments(
- const tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
+ const absl::Span<const ShapedBuffer* const> arguments,
HloSnapshot* hlo_snapshot) {
hlo_snapshot->clear_arguments();
for (const ShapedBuffer* argument : arguments) {
@@ -245,7 +245,7 @@ Backend* LocalClient::mutable_backend() {
StatusOr<std::unique_ptr<LocalExecutable>> LocalClient::Compile(
const XlaComputation& computation,
- const tensorflow::gtl::ArraySlice<const Shape*> argument_layouts,
+ const absl::Span<const Shape* const> argument_layouts,
const ExecutableBuildOptions& options) {
ExecutableBuildOptions updated_options = options;
if (options.device_ordinal() == -1) {
diff --git a/tensorflow/compiler/xla/client/local_client.h b/tensorflow/compiler/xla/client/local_client.h
index ae23809261..78a1310c55 100644
--- a/tensorflow/compiler/xla/client/local_client.h
+++ b/tensorflow/compiler/xla/client/local_client.h
@@ -40,7 +40,7 @@ class LocalExecutable {
// Run the compiled computation with the given arguments and options and
// return the result.
StatusOr<ScopedShapedBuffer> Run(
- const tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
+ const absl::Span<const ShapedBuffer* const> arguments,
ExecutableRunOptions run_options);
// Return the options used to build the executable.
@@ -63,7 +63,7 @@ class LocalExecutable {
// The given ExecutableRunOptions override any values from legacy_flags
// (TF_XLA_FLAGS environment variable).
Status ValidateExecutionOptions(
- const tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
+ const absl::Span<const ShapedBuffer* const> arguments,
const ExecutableRunOptions& run_options, const Backend& backend);
// Records the computation in a SessionModule proto with the arguments used to
@@ -73,13 +73,12 @@ class LocalExecutable {
// (TF_XLA_FLAGS environment variable).
StatusOr<ScopedShapedBuffer> ExecuteAndDump(
const ServiceExecutableRunOptions* run_options,
- const tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments);
+ const absl::Span<const ShapedBuffer* const> arguments);
// Records the arguments used to invoke the computation in a SessionModule
// proto.
- Status RecordArguments(
- const tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
- HloSnapshot* hlo_snapshot);
+ Status RecordArguments(const absl::Span<const ShapedBuffer* const> arguments,
+ HloSnapshot* hlo_snapshot);
// Records the result of the computation in a SessionModule proto.
Status RecordResult(const ShapedBuffer* result, HloSnapshot* hlo_snapshot);
@@ -120,7 +119,7 @@ class LocalClient : public Client {
// (TF_XLA_FLAGS environment variable).
StatusOr<std::unique_ptr<LocalExecutable>> Compile(
const XlaComputation& computation,
- const tensorflow::gtl::ArraySlice<const Shape*> argument_layouts,
+ const absl::Span<const Shape* const> argument_layouts,
const ExecutableBuildOptions& options);
// Copy the literal data to the device with the given ordinal and return as a
diff --git a/tensorflow/compiler/xla/client/padding.cc b/tensorflow/compiler/xla/client/padding.cc
index ed4dc8e9f6..992b13139c 100644
--- a/tensorflow/compiler/xla/client/padding.cc
+++ b/tensorflow/compiler/xla/client/padding.cc
@@ -23,10 +23,9 @@ limitations under the License.
namespace xla {
-Status ValidatePaddingValues(
- tensorflow::gtl::ArraySlice<int64> input_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides) {
+Status ValidatePaddingValues(absl::Span<const int64> input_dimensions,
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides) {
bool ok = input_dimensions.size() == window_dimensions.size() &&
input_dimensions.size() == window_strides.size();
if (!ok) {
@@ -40,9 +39,9 @@ Status ValidatePaddingValues(
}
std::vector<std::pair<int64, int64>> MakePadding(
- tensorflow::gtl::ArraySlice<int64> input_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides, Padding padding) {
+ absl::Span<const int64> input_dimensions,
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides, Padding padding) {
TF_CHECK_OK(ValidatePaddingValues(input_dimensions, window_dimensions,
window_strides));
std::vector<std::pair<int64, int64>> low_high_padding;
diff --git a/tensorflow/compiler/xla/client/padding.h b/tensorflow/compiler/xla/client/padding.h
index e23b0b3a90..d5bd26cafb 100644
--- a/tensorflow/compiler/xla/client/padding.h
+++ b/tensorflow/compiler/xla/client/padding.h
@@ -41,10 +41,9 @@ enum class Padding {
// Validates that the slices are acceptable for determining padding -- this can
// be used to check the preconditions of MakePadding below to produce an error
// message that can be returned to the user.
-Status ValidatePaddingValues(
- tensorflow::gtl::ArraySlice<int64> input_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides);
+Status ValidatePaddingValues(absl::Span<const int64> input_dimensions,
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides);
// Returns the padding needed for the base area, given the base area dimensions,
// window dimensions, strides, and the type of padding.
@@ -58,9 +57,9 @@ Status ValidatePaddingValues(
// window_dimensions, and strides must match, which is equal to the number
// of elements in the result vector.
std::vector<std::pair<int64, int64>> MakePadding(
- tensorflow::gtl::ArraySlice<int64> input_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides, Padding padding);
+ absl::Span<const int64> input_dimensions,
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides, Padding padding);
} // namespace xla
diff --git a/tensorflow/compiler/xla/client/xla_builder.cc b/tensorflow/compiler/xla/client/xla_builder.cc
index 531b8dd66b..e639028ccd 100644
--- a/tensorflow/compiler/xla/client/xla_builder.cc
+++ b/tensorflow/compiler/xla/client/xla_builder.cc
@@ -90,7 +90,7 @@ StatusOr<Shape> XlaBuilder::GetShape(const XlaOp& op) const {
}
StatusOr<std::vector<Shape>> XlaBuilder::GetOperandShapes(
- tensorflow::gtl::ArraySlice<XlaOp> operands) const {
+ absl::Span<const XlaOp> operands) const {
std::vector<Shape> operand_shapes;
for (const XlaOp& operand : operands) {
TF_ASSIGN_OR_RETURN(const Shape& shape, GetShape(operand));
@@ -291,7 +291,7 @@ StatusOr<XlaComputation> XlaBuilder::Build(int64 root_id) {
StatusOr<XlaOp> XlaBuilder::InDimBroadcast(
const Shape& shape, const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
TF_RETURN_IF_ERROR(first_error_);
HloInstructionProto instr;
@@ -352,9 +352,8 @@ XlaOp XlaBuilder::UnaryOp(HloOpcode unop, const XlaOp& operand) {
});
}
-XlaOp XlaBuilder::BinaryOp(
- HloOpcode binop, const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+XlaOp XlaBuilder::BinaryOp(HloOpcode binop, const XlaOp& lhs, const XlaOp& rhs,
+ absl::Span<const int64> broadcast_dimensions) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& lhs_shape, GetShape(lhs));
@@ -448,12 +447,12 @@ XlaOp XlaBuilder::TernaryOp(HloOpcode triop, const XlaOp& lhs, const XlaOp& rhs,
}
XlaOp XlaBuilder::Add(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return BinaryOp(HloOpcode::kAdd, lhs, rhs, broadcast_dimensions);
}
XlaOp XlaBuilder::Mul(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return BinaryOp(HloOpcode::kMultiply, lhs, rhs, broadcast_dimensions);
}
@@ -480,7 +479,7 @@ XlaOp XlaBuilder::Iota(PrimitiveType type, int64 size) {
}
XlaOp XlaBuilder::Call(const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<XlaOp> operands) {
+ absl::Span<const XlaOp> operands) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
std::vector<const Shape*> operand_shape_ptrs;
@@ -515,8 +514,8 @@ XlaOp XlaBuilder::Parameter(int64 parameter_number, const Shape& shape,
});
}
-XlaOp XlaBuilder::Broadcast(
- const XlaOp& operand, tensorflow::gtl::ArraySlice<int64> broadcast_sizes) {
+XlaOp XlaBuilder::Broadcast(const XlaOp& operand,
+ absl::Span<const int64> broadcast_sizes) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
TF_ASSIGN_OR_RETURN(
@@ -541,7 +540,7 @@ XlaOp XlaBuilder::Broadcast(
XlaOp XlaBuilder::BroadcastInDim(
const XlaOp& operand, const Shape& shape,
- const tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ const absl::Span<const int64> broadcast_dimensions) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
return InDimBroadcast(shape, operand, broadcast_dimensions);
});
@@ -556,9 +555,9 @@ StatusOr<XlaOp> XlaBuilder::Reshape(const Shape& shape, const XlaOp& operand) {
}
XlaOp XlaBuilder::Slice(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> start_indices,
- tensorflow::gtl::ArraySlice<int64> limit_indices,
- tensorflow::gtl::ArraySlice<int64> strides) {
+ absl::Span<const int64> start_indices,
+ absl::Span<const int64> limit_indices,
+ absl::Span<const int64> strides) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
@@ -593,7 +592,7 @@ XlaOp XlaBuilder::SliceInDim(const XlaOp& operand, int64 start_index,
}
XlaOp XlaBuilder::DynamicSlice(const XlaOp& operand, const XlaOp& start_indices,
- tensorflow::gtl::ArraySlice<int64> slice_sizes) {
+ absl::Span<const int64> slice_sizes) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
@@ -631,7 +630,7 @@ XlaOp XlaBuilder::DynamicUpdateSlice(const XlaOp& operand, const XlaOp& update,
});
}
-XlaOp XlaBuilder::ConcatInDim(tensorflow::gtl::ArraySlice<XlaOp> operands,
+XlaOp XlaBuilder::ConcatInDim(absl::Span<const XlaOp> operands,
int64 dimension) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
@@ -671,8 +670,8 @@ XlaOp XlaBuilder::Pad(const XlaOp& operand, const XlaOp& padding_value,
}
XlaOp XlaBuilder::Reshape(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> dimensions,
- tensorflow::gtl::ArraySlice<int64> new_sizes) {
+ absl::Span<const int64> dimensions,
+ absl::Span<const int64> new_sizes) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
TF_ASSIGN_OR_RETURN(const Shape& shape,
@@ -686,7 +685,7 @@ XlaOp XlaBuilder::Reshape(const XlaOp& operand,
}
XlaOp XlaBuilder::Reshape(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> new_sizes) {
+ absl::Span<const int64> new_sizes) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto shape, GetShape(operand));
std::vector<int64> dimensions(shape.dimensions_size());
@@ -696,7 +695,7 @@ XlaOp XlaBuilder::Reshape(const XlaOp& operand,
}
XlaOp XlaBuilder::Collapse(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> dimensions) {
+ absl::Span<const int64> dimensions) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
if (dimensions.size() <= 1) {
// Not collapsing anything, trivially we can return the operand versus
@@ -706,8 +705,7 @@ XlaOp XlaBuilder::Collapse(const XlaOp& operand,
// Out-of-order collapse is not supported.
// Checks that the collapsed dimensions are in order and consecutive.
- for (tensorflow::gtl::ArraySlice<int64>::size_type i = 1;
- i < dimensions.size(); ++i) {
+ for (absl::Span<const int64>::size_type i = 1; i < dimensions.size(); ++i) {
if (dimensions[i] - 1 != dimensions[i - 1]) {
return InvalidArgument(
"Collapsed dimensions are not in consecutive order.");
@@ -758,7 +756,7 @@ XlaOp XlaBuilder::Select(const XlaOp& pred, const XlaOp& on_true,
});
}
-XlaOp XlaBuilder::Tuple(tensorflow::gtl::ArraySlice<XlaOp> elements) {
+XlaOp XlaBuilder::Tuple(absl::Span<const XlaOp> elements) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
std::vector<const Shape*> operand_shape_ptrs;
@@ -792,32 +790,32 @@ XlaOp XlaBuilder::GetTupleElement(const XlaOp& tuple_data, int64 index) {
}
XlaOp XlaBuilder::Eq(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return BinaryOp(HloOpcode::kEq, lhs, rhs, broadcast_dimensions);
}
XlaOp XlaBuilder::Ne(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return BinaryOp(HloOpcode::kNe, lhs, rhs, broadcast_dimensions);
}
XlaOp XlaBuilder::Ge(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return BinaryOp(HloOpcode::kGe, lhs, rhs, broadcast_dimensions);
}
XlaOp XlaBuilder::Gt(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return BinaryOp(HloOpcode::kGt, lhs, rhs, broadcast_dimensions);
}
XlaOp XlaBuilder::Le(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return BinaryOp(HloOpcode::kLe, lhs, rhs, broadcast_dimensions);
}
XlaOp XlaBuilder::Lt(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return BinaryOp(HloOpcode::kLt, lhs, rhs, broadcast_dimensions);
}
@@ -899,8 +897,8 @@ Status XlaBuilder::VerifyConvolution(
}
XlaOp XlaBuilder::Conv(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- Padding padding, int64 feature_group_count,
+ absl::Span<const int64> window_strides, Padding padding,
+ int64 feature_group_count,
const PrecisionConfigProto* precision_config_proto) {
return ConvWithGeneralDimensions(
lhs, rhs, window_strides, padding,
@@ -909,9 +907,8 @@ XlaOp XlaBuilder::Conv(const XlaOp& lhs, const XlaOp& rhs,
}
XlaOp XlaBuilder::ConvWithGeneralPadding(
- const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
+ const XlaOp& lhs, const XlaOp& rhs, absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding,
int64 feature_group_count,
const PrecisionConfigProto* precision_config_proto) {
return ConvGeneral(lhs, rhs, window_strides, padding,
@@ -920,9 +917,8 @@ XlaOp XlaBuilder::ConvWithGeneralPadding(
}
XlaOp XlaBuilder::ConvWithGeneralDimensions(
- const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> window_strides, Padding padding,
- const ConvolutionDimensionNumbers& dimension_numbers,
+ const XlaOp& lhs, const XlaOp& rhs, absl::Span<const int64> window_strides,
+ Padding padding, const ConvolutionDimensionNumbers& dimension_numbers,
int64 feature_group_count,
const PrecisionConfigProto* precision_config_proto) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
@@ -957,9 +953,8 @@ XlaOp XlaBuilder::ConvWithGeneralDimensions(
}
XlaOp XlaBuilder::ConvGeneral(
- const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
+ const XlaOp& lhs, const XlaOp& rhs, absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding,
const ConvolutionDimensionNumbers& dimension_numbers,
int64 feature_group_count,
const PrecisionConfigProto* precision_config_proto) {
@@ -969,11 +964,9 @@ XlaOp XlaBuilder::ConvGeneral(
}
XlaOp XlaBuilder::ConvGeneralDilated(
- const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
- tensorflow::gtl::ArraySlice<int64> lhs_dilation,
- tensorflow::gtl::ArraySlice<int64> rhs_dilation,
+ const XlaOp& lhs, const XlaOp& rhs, absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding,
+ absl::Span<const int64> lhs_dilation, absl::Span<const int64> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers,
int64 feature_group_count,
const PrecisionConfigProto* precision_config_proto) {
@@ -1013,11 +1006,11 @@ XlaOp XlaBuilder::ConvGeneralDilated(
}
StatusOr<Window> XlaBuilder::MakeWindow(
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
- tensorflow::gtl::ArraySlice<int64> lhs_dilation,
- tensorflow::gtl::ArraySlice<int64> rhs_dilation) const {
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding,
+ absl::Span<const int64> lhs_dilation,
+ absl::Span<const int64> rhs_dilation) const {
const auto verify_size = [&](const size_t x, const char* x_name) {
if (x == 0 || x == window_dimensions.size()) {
return Status::OK();
@@ -1067,7 +1060,7 @@ StatusOr<Window> XlaBuilder::MakeWindow(
}
XlaOp XlaBuilder::Fft(const XlaOp& operand, const FftType fft_type,
- const tensorflow::gtl::ArraySlice<int64> fft_length) {
+ const absl::Span<const int64> fft_length) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
@@ -1276,7 +1269,7 @@ XlaOp XlaBuilder::CreateToken() {
});
}
-XlaOp XlaBuilder::AfterAll(tensorflow::gtl::ArraySlice<XlaOp> tokens) {
+XlaOp XlaBuilder::AfterAll(absl::Span<const XlaOp> tokens) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
if (tokens.empty()) {
return InvalidArgument("AfterAll requires at least one operand");
@@ -1288,7 +1281,7 @@ XlaOp XlaBuilder::AfterAll(tensorflow::gtl::ArraySlice<XlaOp> tokens) {
}
XlaOp XlaBuilder::CustomCall(const string& call_target_name,
- tensorflow::gtl::ArraySlice<XlaOp> operands,
+ absl::Span<const XlaOp> operands,
const Shape& shape) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
@@ -1304,9 +1297,8 @@ XlaOp XlaBuilder::CustomCall(const string& call_target_name,
});
}
-XlaOp XlaBuilder::Complex(
- const XlaOp& real, const XlaOp& imag,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+XlaOp XlaBuilder::Complex(const XlaOp& real, const XlaOp& imag,
+ absl::Span<const int64> broadcast_dimensions) {
return BinaryOp(HloOpcode::kComplex, real, imag, broadcast_dimensions);
}
@@ -1315,42 +1307,42 @@ XlaOp XlaBuilder::Conj(const XlaOp& operand) {
}
XlaOp XlaBuilder::Sub(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return BinaryOp(HloOpcode::kSubtract, lhs, rhs, broadcast_dimensions);
}
XlaOp XlaBuilder::Div(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return BinaryOp(HloOpcode::kDivide, lhs, rhs, broadcast_dimensions);
}
XlaOp XlaBuilder::Rem(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return BinaryOp(HloOpcode::kRemainder, lhs, rhs, broadcast_dimensions);
}
XlaOp XlaBuilder::Max(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return BinaryOp(HloOpcode::kMaximum, lhs, rhs, broadcast_dimensions);
}
XlaOp XlaBuilder::Min(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return BinaryOp(HloOpcode::kMinimum, lhs, rhs, broadcast_dimensions);
}
XlaOp XlaBuilder::And(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return BinaryOp(HloOpcode::kAnd, lhs, rhs, broadcast_dimensions);
}
XlaOp XlaBuilder::Or(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return BinaryOp(HloOpcode::kOr, lhs, rhs, broadcast_dimensions);
}
XlaOp XlaBuilder::Xor(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return BinaryOp(HloOpcode::kXor, lhs, rhs, broadcast_dimensions);
}
@@ -1358,22 +1350,21 @@ XlaOp XlaBuilder::Not(const XlaOp& operand) {
return UnaryOp(HloOpcode::kNot, operand);
}
-XlaOp XlaBuilder::ShiftLeft(
- const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+XlaOp XlaBuilder::ShiftLeft(const XlaOp& lhs, const XlaOp& rhs,
+ absl::Span<const int64> broadcast_dimensions) {
return BinaryOp(HloOpcode::kShiftLeft, lhs, rhs, broadcast_dimensions);
}
XlaOp XlaBuilder::ShiftRightArithmetic(
const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return BinaryOp(HloOpcode::kShiftRightArithmetic, lhs, rhs,
broadcast_dimensions);
}
XlaOp XlaBuilder::ShiftRightLogical(
const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return BinaryOp(HloOpcode::kShiftRightLogical, lhs, rhs,
broadcast_dimensions);
}
@@ -1382,9 +1373,8 @@ XlaOp XlaBuilder::Abs(const XlaOp& operand) {
return UnaryOp(HloOpcode::kAbs, operand);
}
-XlaOp XlaBuilder::Atan2(
- const XlaOp& y, const XlaOp& x,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+XlaOp XlaBuilder::Atan2(const XlaOp& y, const XlaOp& x,
+ absl::Span<const int64> broadcast_dimensions) {
return BinaryOp(HloOpcode::kAtan2, y, x, broadcast_dimensions);
}
@@ -1449,7 +1439,7 @@ XlaOp XlaBuilder::IsFinite(const XlaOp& operand) {
}
XlaOp XlaBuilder::Transpose(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> permutation) {
+ absl::Span<const int64> permutation) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
@@ -1464,7 +1454,7 @@ XlaOp XlaBuilder::Transpose(const XlaOp& operand,
}
XlaOp XlaBuilder::Rev(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> dimensions) {
+ absl::Span<const int64> dimensions) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
@@ -1506,7 +1496,7 @@ XlaOp XlaBuilder::Sort(XlaOp keys, absl::optional<XlaOp> values,
}
XlaOp XlaBuilder::Pow(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return BinaryOp(HloOpcode::kPower, lhs, rhs, broadcast_dimensions);
}
@@ -1544,10 +1534,10 @@ XlaOp XlaBuilder::Clamp(const XlaOp& min, const XlaOp& operand,
return TernaryOp(HloOpcode::kClamp, min, operand, max);
}
-XlaOp XlaBuilder::Map(tensorflow::gtl::ArraySlice<XlaOp> operands,
+XlaOp XlaBuilder::Map(absl::Span<const XlaOp> operands,
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<int64> dimensions,
- tensorflow::gtl::ArraySlice<XlaOp> static_operands) {
+ absl::Span<const int64> dimensions,
+ absl::Span<const XlaOp> static_operands) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
if (!static_operands.empty()) {
return Unimplemented("static_operands is not supported in Map");
@@ -1588,7 +1578,7 @@ XlaOp XlaBuilder::Map(tensorflow::gtl::ArraySlice<XlaOp> operands,
}
XlaOp XlaBuilder::RngOp(RandomDistribution distribution,
- tensorflow::gtl::ArraySlice<XlaOp> parameters,
+ absl::Span<const XlaOp> parameters,
const Shape& shape) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
@@ -1649,7 +1639,7 @@ XlaOp XlaBuilder::While(const XlaComputation& condition,
XlaOp XlaBuilder::Gather(const XlaOp& input, const XlaOp& start_indices,
const GatherDimensionNumbers& dimension_numbers,
- tensorflow::gtl::ArraySlice<int64> slice_sizes) {
+ absl::Span<const int64> slice_sizes) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
@@ -1729,20 +1719,18 @@ XlaOp XlaBuilder::Conditional(const XlaOp& predicate, const XlaOp& true_operand,
});
}
-XlaOp XlaBuilder::Reduce(
- const XlaOp& operand, const XlaOp& init_value,
- const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<int64> dimensions_to_reduce) {
- return Reduce(tensorflow::gtl::ArraySlice<XlaOp>({operand}),
- tensorflow::gtl::ArraySlice<XlaOp>({init_value}), computation,
+XlaOp XlaBuilder::Reduce(const XlaOp& operand, const XlaOp& init_value,
+ const XlaComputation& computation,
+ absl::Span<const int64> dimensions_to_reduce) {
+ return Reduce(absl::Span<const XlaOp>({operand}),
+ absl::Span<const XlaOp>({init_value}), computation,
dimensions_to_reduce);
}
-XlaOp XlaBuilder::Reduce(
- tensorflow::gtl::ArraySlice<XlaOp> operands,
- tensorflow::gtl::ArraySlice<XlaOp> init_values,
- const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<int64> dimensions_to_reduce) {
+XlaOp XlaBuilder::Reduce(absl::Span<const XlaOp> operands,
+ absl::Span<const XlaOp> init_values,
+ const XlaComputation& computation,
+ absl::Span<const int64> dimensions_to_reduce) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
@@ -1785,11 +1773,11 @@ XlaOp XlaBuilder::ReduceAll(const XlaOp& operand, const XlaOp& init_value,
});
}
-XlaOp XlaBuilder::ReduceWindow(
- const XlaOp& operand, const XlaOp& init_value,
- const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides, Padding padding) {
+XlaOp XlaBuilder::ReduceWindow(const XlaOp& operand, const XlaOp& init_value,
+ const XlaComputation& computation,
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides,
+ Padding padding) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
@@ -1810,9 +1798,9 @@ XlaOp XlaBuilder::ReduceWindow(
XlaOp XlaBuilder::ReduceWindowWithGeneralPadding(
const XlaOp& operand, const XlaOp& init_value,
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding) {
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
@@ -1907,8 +1895,7 @@ XlaOp XlaBuilder::BatchNormGrad(const XlaOp& operand, const XlaOp& scale,
}
XlaOp XlaBuilder::CrossReplicaSum(
- const XlaOp& operand,
- tensorflow::gtl::ArraySlice<ReplicaGroup> replica_groups) {
+ const XlaOp& operand, absl::Span<const ReplicaGroup> replica_groups) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape& shape, GetShape(operand));
const Shape& scalar_shape = ShapeUtil::MakeShape(shape.element_type(), {});
@@ -1923,7 +1910,7 @@ XlaOp XlaBuilder::CrossReplicaSum(
XlaOp XlaBuilder::CrossReplicaSum(
const XlaOp& operand, const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<ReplicaGroup> replica_groups,
+ absl::Span<const ReplicaGroup> replica_groups,
const absl::optional<ChannelHandle>& channel_id) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
@@ -2023,12 +2010,13 @@ XlaOp XlaBuilder::CollectivePermute(
});
}
-XlaOp XlaBuilder::SelectAndScatter(
- const XlaOp& operand, const XlaComputation& select,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides, Padding padding,
- const XlaOp& source, const XlaOp& init_value,
- const XlaComputation& scatter) {
+XlaOp XlaBuilder::SelectAndScatter(const XlaOp& operand,
+ const XlaComputation& select,
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides,
+ Padding padding, const XlaOp& source,
+ const XlaOp& init_value,
+ const XlaComputation& scatter) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
return SelectAndScatterWithGeneralPadding(
@@ -2041,11 +2029,10 @@ XlaOp XlaBuilder::SelectAndScatter(
XlaOp XlaBuilder::SelectAndScatterWithGeneralPadding(
const XlaOp& operand, const XlaComputation& select,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
- const XlaOp& source, const XlaOp& init_value,
- const XlaComputation& scatter) {
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding, const XlaOp& source,
+ const XlaOp& init_value, const XlaComputation& scatter) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
HloInstructionProto instr;
@@ -2428,9 +2415,9 @@ XlaBuilder::CreateDefaultConvDimensionNumbers(int num_spatial_dims) {
return Status::OK();
}
-StatusOr<XlaOp> XlaBuilder::AddInstruction(
- HloInstructionProto&& instr, HloOpcode opcode,
- tensorflow::gtl::ArraySlice<XlaOp> operands) {
+StatusOr<XlaOp> XlaBuilder::AddInstruction(HloInstructionProto&& instr,
+ HloOpcode opcode,
+ absl::Span<const XlaOp> operands) {
TF_RETURN_IF_ERROR(first_error_);
const int64 handle = instructions_.size();
@@ -2504,14 +2491,12 @@ XlaOp ConstantLiteral(XlaBuilder* builder, const LiteralSlice& literal) {
return builder->ConstantLiteral(literal);
}
-XlaOp Broadcast(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> broadcast_sizes) {
+XlaOp Broadcast(const XlaOp& operand, absl::Span<const int64> broadcast_sizes) {
return operand.builder()->Broadcast(operand, broadcast_sizes);
}
-XlaOp BroadcastInDim(
- const XlaOp& operand, const Shape& shape,
- const tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+XlaOp BroadcastInDim(const XlaOp& operand, const Shape& shape,
+ const absl::Span<const int64> broadcast_dimensions) {
return operand.builder()->BroadcastInDim(operand, shape,
broadcast_dimensions);
}
@@ -2521,26 +2506,22 @@ XlaOp Pad(const XlaOp& operand, const XlaOp& padding_value,
return operand.builder()->Pad(operand, padding_value, padding_config);
}
-XlaOp Reshape(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> dimensions,
- tensorflow::gtl::ArraySlice<int64> new_sizes) {
+XlaOp Reshape(const XlaOp& operand, absl::Span<const int64> dimensions,
+ absl::Span<const int64> new_sizes) {
return operand.builder()->Reshape(operand, dimensions, new_sizes);
}
-XlaOp Reshape(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> new_sizes) {
+XlaOp Reshape(const XlaOp& operand, absl::Span<const int64> new_sizes) {
return operand.builder()->Reshape(operand, new_sizes);
}
-XlaOp Collapse(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> dimensions) {
+XlaOp Collapse(const XlaOp& operand, absl::Span<const int64> dimensions) {
return operand.builder()->Collapse(operand, dimensions);
}
-XlaOp Slice(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> start_indices,
- tensorflow::gtl::ArraySlice<int64> limit_indices,
- tensorflow::gtl::ArraySlice<int64> strides) {
+XlaOp Slice(const XlaOp& operand, absl::Span<const int64> start_indices,
+ absl::Span<const int64> limit_indices,
+ absl::Span<const int64> strides) {
return operand.builder()->Slice(operand, start_indices, limit_indices,
strides);
}
@@ -2552,7 +2533,7 @@ XlaOp SliceInDim(const XlaOp& operand, int64 start_index, int64 limit_index,
}
XlaOp DynamicSlice(const XlaOp& operand, const XlaOp& start_indices,
- tensorflow::gtl::ArraySlice<int64> slice_sizes) {
+ absl::Span<const int64> slice_sizes) {
return operand.builder()->DynamicSlice(operand, start_indices, slice_sizes);
}
@@ -2561,8 +2542,7 @@ XlaOp DynamicUpdateSlice(const XlaOp& operand, const XlaOp& update,
return operand.builder()->DynamicUpdateSlice(operand, update, start_indices);
}
-XlaOp ConcatInDim(XlaBuilder* builder,
- tensorflow::gtl::ArraySlice<XlaOp> operands,
+XlaOp ConcatInDim(XlaBuilder* builder, absl::Span<const XlaOp> operands,
int64 dimension) {
return builder->ConcatInDim(operands, dimension);
}
@@ -2575,7 +2555,7 @@ XlaOp Select(const XlaOp& pred, const XlaOp& on_true, const XlaOp& on_false) {
return pred.builder()->Select(pred, on_true, on_false);
}
-XlaOp Tuple(XlaBuilder* builder, tensorflow::gtl::ArraySlice<XlaOp> elements) {
+XlaOp Tuple(XlaBuilder* builder, absl::Span<const XlaOp> elements) {
return builder->Tuple(elements);
}
@@ -2584,32 +2564,32 @@ XlaOp GetTupleElement(const XlaOp& tuple_data, int64 index) {
}
XlaOp Eq(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return lhs.builder()->Eq(lhs, rhs, broadcast_dimensions);
}
XlaOp Ne(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return lhs.builder()->Ne(lhs, rhs, broadcast_dimensions);
}
XlaOp Ge(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return lhs.builder()->Ge(lhs, rhs, broadcast_dimensions);
}
XlaOp Gt(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return lhs.builder()->Gt(lhs, rhs, broadcast_dimensions);
}
XlaOp Lt(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return lhs.builder()->Lt(lhs, rhs, broadcast_dimensions);
}
XlaOp Le(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return lhs.builder()->Le(lhs, rhs, broadcast_dimensions);
}
@@ -2626,7 +2606,7 @@ XlaOp DotGeneral(const XlaOp& lhs, const XlaOp& rhs,
}
XlaOp Conv(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> window_strides, Padding padding,
+ absl::Span<const int64> window_strides, Padding padding,
int64 feature_group_count,
const PrecisionConfigProto* precision_config_proto) {
return lhs.builder()->Conv(lhs, rhs, window_strides, padding,
@@ -2634,9 +2614,8 @@ XlaOp Conv(const XlaOp& lhs, const XlaOp& rhs,
}
XlaOp ConvWithGeneralPadding(
- const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
+ const XlaOp& lhs, const XlaOp& rhs, absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding,
int64 feature_group_count,
const PrecisionConfigProto* precision_config_proto) {
return lhs.builder()->ConvWithGeneralPadding(lhs, rhs, window_strides,
@@ -2645,9 +2624,8 @@ XlaOp ConvWithGeneralPadding(
}
XlaOp ConvWithGeneralDimensions(
- const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> window_strides, Padding padding,
- const ConvolutionDimensionNumbers& dimension_numbers,
+ const XlaOp& lhs, const XlaOp& rhs, absl::Span<const int64> window_strides,
+ Padding padding, const ConvolutionDimensionNumbers& dimension_numbers,
int64 feature_group_count,
const PrecisionConfigProto* precision_config_proto) {
return lhs.builder()->ConvWithGeneralDimensions(
@@ -2656,8 +2634,8 @@ XlaOp ConvWithGeneralDimensions(
}
XlaOp ConvGeneral(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding,
const ConvolutionDimensionNumbers& dimension_numbers,
int64 feature_group_count,
const PrecisionConfigProto* precision_config_proto) {
@@ -2666,22 +2644,21 @@ XlaOp ConvGeneral(const XlaOp& lhs, const XlaOp& rhs,
precision_config_proto);
}
-XlaOp ConvGeneralDilated(
- const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
- tensorflow::gtl::ArraySlice<int64> lhs_dilation,
- tensorflow::gtl::ArraySlice<int64> rhs_dilation,
- const ConvolutionDimensionNumbers& dimension_numbers,
- int64 feature_group_count,
- const PrecisionConfigProto* precision_config_proto) {
+XlaOp ConvGeneralDilated(const XlaOp& lhs, const XlaOp& rhs,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding,
+ absl::Span<const int64> lhs_dilation,
+ absl::Span<const int64> rhs_dilation,
+ const ConvolutionDimensionNumbers& dimension_numbers,
+ int64 feature_group_count,
+ const PrecisionConfigProto* precision_config_proto) {
return lhs.builder()->ConvGeneralDilated(
lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, precision_config_proto);
}
XlaOp Fft(const XlaOp& operand, FftType fft_type,
- tensorflow::gtl::ArraySlice<int64> fft_length) {
+ absl::Span<const int64> fft_length) {
return operand.builder()->Fft(operand, fft_type, fft_length);
}
@@ -2695,105 +2672,102 @@ void Outfeed(const XlaOp& operand, const Shape& shape_with_layout,
}
XlaOp Call(XlaBuilder* builder, const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<XlaOp> operands) {
+ absl::Span<const XlaOp> operands) {
return builder->Call(computation, operands);
}
XlaOp CustomCall(XlaBuilder* builder, const string& call_target_name,
- tensorflow::gtl::ArraySlice<XlaOp> operands,
- const Shape& shape) {
+ absl::Span<const XlaOp> operands, const Shape& shape) {
return builder->CustomCall(call_target_name, operands, shape);
}
XlaOp Complex(const XlaOp& real, const XlaOp& imag,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return real.builder()->Complex(real, imag, broadcast_dimensions);
}
XlaOp Conj(const XlaOp& operand) { return operand.builder()->Conj(operand); }
XlaOp Add(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return lhs.builder()->Add(lhs, rhs, broadcast_dimensions);
}
XlaOp Sub(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return lhs.builder()->Sub(lhs, rhs, broadcast_dimensions);
}
XlaOp Mul(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return lhs.builder()->Mul(lhs, rhs, broadcast_dimensions);
}
XlaOp Div(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return lhs.builder()->Div(lhs, rhs, broadcast_dimensions);
}
XlaOp Rem(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return lhs.builder()->Rem(lhs, rhs, broadcast_dimensions);
}
XlaOp Max(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return lhs.builder()->Max(lhs, rhs, broadcast_dimensions);
}
XlaOp Min(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return lhs.builder()->Min(lhs, rhs, broadcast_dimensions);
}
XlaOp And(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return lhs.builder()->And(lhs, rhs, broadcast_dimensions);
}
XlaOp Or(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return lhs.builder()->Or(lhs, rhs, broadcast_dimensions);
}
XlaOp Xor(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return lhs.builder()->Xor(lhs, rhs, broadcast_dimensions);
}
XlaOp Not(const XlaOp& operand) { return operand.builder()->Not(operand); }
XlaOp ShiftLeft(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return lhs.builder()->ShiftLeft(lhs, rhs, broadcast_dimensions);
}
-XlaOp ShiftRightArithmetic(
- const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+XlaOp ShiftRightArithmetic(const XlaOp& lhs, const XlaOp& rhs,
+ absl::Span<const int64> broadcast_dimensions) {
return lhs.builder()->ShiftRightArithmetic(lhs, rhs, broadcast_dimensions);
}
-XlaOp ShiftRightLogical(
- const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+XlaOp ShiftRightLogical(const XlaOp& lhs, const XlaOp& rhs,
+ absl::Span<const int64> broadcast_dimensions) {
return lhs.builder()->ShiftRightLogical(lhs, rhs, broadcast_dimensions);
}
XlaOp Reduce(const XlaOp& operand, const XlaOp& init_value,
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<int64> dimensions_to_reduce) {
+ absl::Span<const int64> dimensions_to_reduce) {
return operand.builder()->Reduce(operand, init_value, computation,
dimensions_to_reduce);
}
// Reduces several arrays simultaneously among the provided dimensions, given
// "computation" as a reduction operator.
-XlaOp Reduce(XlaBuilder* builder, tensorflow::gtl::ArraySlice<XlaOp> operands,
- tensorflow::gtl::ArraySlice<XlaOp> init_values,
+XlaOp Reduce(XlaBuilder* builder, absl::Span<const XlaOp> operands,
+ absl::Span<const XlaOp> init_values,
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<int64> dimensions_to_reduce) {
+ absl::Span<const int64> dimensions_to_reduce) {
return builder->Reduce(operands, init_values, computation,
dimensions_to_reduce);
}
@@ -2805,9 +2779,8 @@ XlaOp ReduceAll(const XlaOp& operand, const XlaOp& init_value,
XlaOp ReduceWindow(const XlaOp& operand, const XlaOp& init_value,
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- Padding padding) {
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides, Padding padding) {
return operand.builder()->ReduceWindow(operand, init_value, computation,
window_dimensions, window_strides,
padding);
@@ -2816,22 +2789,21 @@ XlaOp ReduceWindow(const XlaOp& operand, const XlaOp& init_value,
XlaOp ReduceWindowWithGeneralPadding(
const XlaOp& operand, const XlaOp& init_value,
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding) {
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding) {
return operand.builder()->ReduceWindowWithGeneralPadding(
operand, init_value, computation, window_dimensions, window_strides,
padding);
}
-XlaOp CrossReplicaSum(
- const XlaOp& operand,
- tensorflow::gtl::ArraySlice<ReplicaGroup> replica_groups) {
+XlaOp CrossReplicaSum(const XlaOp& operand,
+ absl::Span<const ReplicaGroup> replica_groups) {
return operand.builder()->CrossReplicaSum(operand, replica_groups);
}
XlaOp CrossReplicaSum(const XlaOp& operand, const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<ReplicaGroup> replica_groups,
+ absl::Span<const ReplicaGroup> replica_groups,
const absl::optional<ChannelHandle>& channel_id) {
return operand.builder()->CrossReplicaSum(operand, computation,
replica_groups, channel_id);
@@ -2851,10 +2823,10 @@ XlaOp CollectivePermute(
}
XlaOp SelectAndScatter(const XlaOp& operand, const XlaComputation& select,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- Padding padding, const XlaOp& source,
- const XlaOp& init_value, const XlaComputation& scatter) {
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides, Padding padding,
+ const XlaOp& source, const XlaOp& init_value,
+ const XlaComputation& scatter) {
return operand.builder()->SelectAndScatter(operand, select, window_dimensions,
window_strides, padding, source,
init_value, scatter);
@@ -2862,11 +2834,10 @@ XlaOp SelectAndScatter(const XlaOp& operand, const XlaComputation& select,
XlaOp SelectAndScatterWithGeneralPadding(
const XlaOp& operand, const XlaComputation& select,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
- const XlaOp& source, const XlaOp& init_value,
- const XlaComputation& scatter) {
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding, const XlaOp& source,
+ const XlaOp& init_value, const XlaComputation& scatter) {
return operand.builder()->SelectAndScatterWithGeneralPadding(
operand, select, window_dimensions, window_strides, padding, source,
init_value, scatter);
@@ -2875,7 +2846,7 @@ XlaOp SelectAndScatterWithGeneralPadding(
XlaOp Abs(const XlaOp& operand) { return operand.builder()->Abs(operand); }
XlaOp Atan2(const XlaOp& y, const XlaOp& x,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return y.builder()->Atan2(y, x, broadcast_dimensions);
}
@@ -2908,7 +2879,7 @@ XlaOp Real(const XlaOp& operand) { return operand.builder()->Real(operand); }
XlaOp Imag(const XlaOp& operand) { return operand.builder()->Imag(operand); }
XlaOp Pow(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return lhs.builder()->Pow(lhs, rhs, broadcast_dimensions);
}
@@ -2926,12 +2897,11 @@ XlaOp BitcastConvertType(const XlaOp& operand, PrimitiveType new_element_type) {
XlaOp Neg(const XlaOp& operand) { return operand.builder()->Neg(operand); }
-XlaOp Transpose(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> permutation) {
+XlaOp Transpose(const XlaOp& operand, absl::Span<const int64> permutation) {
return operand.builder()->Transpose(operand, permutation);
}
-XlaOp Rev(const XlaOp& operand, tensorflow::gtl::ArraySlice<int64> dimensions) {
+XlaOp Rev(const XlaOp& operand, absl::Span<const int64> dimensions) {
return operand.builder()->Rev(operand, dimensions);
}
@@ -2943,10 +2913,9 @@ XlaOp Clamp(const XlaOp& min, const XlaOp& operand, const XlaOp& max) {
return min.builder()->Clamp(min, operand, max);
}
-XlaOp Map(XlaBuilder* builder, tensorflow::gtl::ArraySlice<XlaOp> operands,
- const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<int64> dimensions,
- tensorflow::gtl::ArraySlice<XlaOp> static_operands) {
+XlaOp Map(XlaBuilder* builder, absl::Span<const XlaOp> operands,
+ const XlaComputation& computation, absl::Span<const int64> dimensions,
+ absl::Span<const XlaOp> static_operands) {
return builder->Map(operands, computation, dimensions, static_operands);
}
@@ -2980,7 +2949,7 @@ XlaOp ReducePrecision(const XlaOp& operand, const int exponent_bits,
XlaOp Gather(const XlaOp& input, const XlaOp& start_indices,
const GatherDimensionNumbers& dimension_numbers,
- tensorflow::gtl::ArraySlice<int64> slice_sizes) {
+ absl::Span<const int64> slice_sizes) {
return input.builder()->Gather(input, start_indices, dimension_numbers,
slice_sizes);
}
@@ -3036,7 +3005,7 @@ XlaOp OutfeedWithToken(const XlaOp& operand, const XlaOp& token,
XlaOp CreateToken(XlaBuilder* builder) { return builder->CreateToken(); }
-XlaOp AfterAll(XlaBuilder* builder, tensorflow::gtl::ArraySlice<XlaOp> tokens) {
+XlaOp AfterAll(XlaBuilder* builder, absl::Span<const XlaOp> tokens) {
return builder->AfterAll(tokens);
}
diff --git a/tensorflow/compiler/xla/client/xla_builder.h b/tensorflow/compiler/xla/client/xla_builder.h
index b9e651f2ae..f83fef0cf2 100644
--- a/tensorflow/compiler/xla/client/xla_builder.h
+++ b/tensorflow/compiler/xla/client/xla_builder.h
@@ -294,7 +294,7 @@ class XlaBuilder {
template <typename NativeT>
XlaOp ConstantR0(NativeT value);
template <typename NativeT>
- XlaOp ConstantR1(tensorflow::gtl::ArraySlice<NativeT> values);
+ XlaOp ConstantR1(absl::Span<const NativeT> values);
XlaOp ConstantR1(const tensorflow::core::Bitmap& values);
template <typename NativeT>
XlaOp ConstantR2(
@@ -336,7 +336,7 @@ class XlaBuilder {
//
// output[i0, ..., iN, j0, ..., jM] = operand[j0, ..., jM]
XlaOp Broadcast(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> broadcast_sizes);
+ absl::Span<const int64> broadcast_sizes);
// Performs in-dimension-style broadcast.
//
@@ -355,9 +355,8 @@ class XlaBuilder {
// will generate output
// [1 , 1]
// [2 , 2]
- XlaOp BroadcastInDim(
- const XlaOp& operand, const Shape& shape,
- const tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ XlaOp BroadcastInDim(const XlaOp& operand, const Shape& shape,
+ const absl::Span<const int64> broadcast_dimensions);
// Enqueues a pad operation onto the computation that pads the given value on
// the edges as well as between the elements of the input. padding_config
@@ -370,15 +369,13 @@ class XlaBuilder {
// given, followed by reshaping it into the shape with the given dimension
// sizes (also major to minor). Conceptually, this is a limited form of
// "shape casting".
- XlaOp Reshape(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> dimensions,
- tensorflow::gtl::ArraySlice<int64> new_sizes);
+ XlaOp Reshape(const XlaOp& operand, absl::Span<const int64> dimensions,
+ absl::Span<const int64> new_sizes);
// Enqueues an operation onto the computation that collapses the operand, from
// first to last dimension (C order), then reshapes it to the given dimension
// sizes. Conceptually, this is a limited form of "shape casting".
- XlaOp Reshape(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> new_sizes);
+ XlaOp Reshape(const XlaOp& operand, absl::Span<const int64> new_sizes);
// Wrapper for Reshape.
// Enqueues an operation to collapse the provided dimensions; e.g. an
@@ -398,8 +395,7 @@ class XlaBuilder {
//
// This could potentially cause data to be moved -- it provides a more
// structured form of reshaping than an arbitrary Reshape operation.
- XlaOp Collapse(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> dimensions);
+ XlaOp Collapse(const XlaOp& operand, absl::Span<const int64> dimensions);
// Enqueues a slice operation onto the computation that slices the operand
// from the start indices to the limit indices; e.g.
@@ -412,10 +408,9 @@ class XlaBuilder {
// Note that "limit" means up-to-but-not-including; i.e. [start, limit) in 1D
// range notation.
// The strides parameter determines the stride over the slice
- XlaOp Slice(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> start_indices,
- tensorflow::gtl::ArraySlice<int64> limit_indices,
- tensorflow::gtl::ArraySlice<int64> strides);
+ XlaOp Slice(const XlaOp& operand, absl::Span<const int64> start_indices,
+ absl::Span<const int64> limit_indices,
+ absl::Span<const int64> strides);
// Enqueues a slice operation in a given dimension, taking all other
// dimensions as they are; e.g. if dimno is 1 from start_index 2 to
@@ -436,7 +431,7 @@ class XlaBuilder {
// Slice index calculations are computed modulo input dimension sizes to
// prevent dynamic start indices from generating out-of-bound array accesses.
XlaOp DynamicSlice(const XlaOp& operand, const XlaOp& start_indices,
- tensorflow::gtl::ArraySlice<int64> slice_sizes);
+ absl::Span<const int64> slice_sizes);
// Enqueues a dynamic update slice operation onto the computation, which
// updates a slice of 'operand' with 'update' at dynamic 'start_indices'.
@@ -459,8 +454,7 @@ class XlaBuilder {
// Enqueues a concatenate instruction onto the computation. 'operands' must
// have >= 1 entry.
- XlaOp ConcatInDim(tensorflow::gtl::ArraySlice<XlaOp> operands,
- int64 dimension);
+ XlaOp ConcatInDim(absl::Span<const XlaOp> operands, int64 dimension);
// Enqueue a tracing operation onto the computation; the computation will emit
// a logging message with the operand.
@@ -471,34 +465,34 @@ class XlaBuilder {
XlaOp Select(const XlaOp& pred, const XlaOp& on_true, const XlaOp& on_false);
// Enqueues a tuple-creation instruction onto the computation.
- XlaOp Tuple(tensorflow::gtl::ArraySlice<XlaOp> elements);
+ XlaOp Tuple(absl::Span<const XlaOp> elements);
// Enqueues a tuple-element-get instruction onto the computation.
XlaOp GetTupleElement(const XlaOp& tuple_data, int64 index);
// Enqueues an equal-to comparison instruction onto the computation.
XlaOp Eq(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Enqueues a not-equal comparison instruction onto the computation.
XlaOp Ne(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Enqueues a greater-or-equal comparison instruction onto the computation.
XlaOp Ge(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Enqueues a greater-than comparison instruction onto the computation.
XlaOp Gt(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Enqueues a less-than comparison instruction onto the computation.
XlaOp Lt(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Enqueues a less-or-equal comparison instruction onto the computation.
XlaOp Le(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Enqueues a dot instruction onto the computation.
XlaOp Dot(const XlaOp& lhs, const XlaOp& rhs,
@@ -513,7 +507,7 @@ class XlaBuilder {
// Enqueues a convolution instruction onto the computation, which uses the
// default convolution dimension numbers.
XlaOp Conv(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> window_strides, Padding padding,
+ absl::Span<const int64> window_strides, Padding padding,
int64 feature_group_count = 1,
const PrecisionConfigProto* precision_config_proto = nullptr);
@@ -521,8 +515,8 @@ class XlaBuilder {
// provided padding configuration in the format returned by MakePadding().
XlaOp ConvWithGeneralPadding(
const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding,
int64 feature_group_count = 1,
const PrecisionConfigProto* precision_config_proto = nullptr);
@@ -530,7 +524,7 @@ class XlaBuilder {
// provided dimension numbers configuration.
XlaOp ConvWithGeneralDimensions(
const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> window_strides, Padding padding,
+ absl::Span<const int64> window_strides, Padding padding,
const ConvolutionDimensionNumbers& dimension_numbers,
int64 feature_group_count = 1,
const PrecisionConfigProto* precision_config_proto = nullptr);
@@ -539,8 +533,8 @@ class XlaBuilder {
// provided padding configuration as well as the dimension numbers.
XlaOp ConvGeneral(
const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding,
const ConvolutionDimensionNumbers& dimension_numbers,
int64 feature_group_count = 1,
const PrecisionConfigProto* precision_config_proto = nullptr);
@@ -549,10 +543,10 @@ class XlaBuilder {
// provided padding configuration, dilation factors and dimension numbers.
XlaOp ConvGeneralDilated(
const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
- tensorflow::gtl::ArraySlice<int64> lhs_dilation,
- tensorflow::gtl::ArraySlice<int64> rhs_dilation,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding,
+ absl::Span<const int64> lhs_dilation,
+ absl::Span<const int64> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers,
int64 feature_group_count = 1,
const PrecisionConfigProto* precision_config_proto = nullptr);
@@ -560,7 +554,7 @@ class XlaBuilder {
// Enqueues an FFT instruction onto the computation, of the given type and
// with the given FFT length.
XlaOp Fft(const XlaOp& operand, FftType fft_type,
- tensorflow::gtl::ArraySlice<int64> fft_length);
+ absl::Span<const int64> fft_length);
// Enqueues an infeed instruction onto the computation, which writes data of
// the given shape to the infeed buffer of the device.
@@ -582,15 +576,14 @@ class XlaBuilder {
// Enqueues a call instruction onto the computation.
XlaOp Call(const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<XlaOp> operands);
+ absl::Span<const XlaOp> operands);
// Enqueues a custom call instruction onto the computation.
// During code generation, a call instruction is emitted which targets a
// symbol with the name |call_target_name|. The |operands| are passed to the
// call instruction. |shape| is the resultant shape.
XlaOp CustomCall(const string& call_target_name,
- tensorflow::gtl::ArraySlice<XlaOp> operands,
- const Shape& shape);
+ absl::Span<const XlaOp> operands, const Shape& shape);
// The following methods enqueue element-wise binary arithmetic operations
// onto the computation. The shapes of the operands have to match unless one
@@ -599,72 +592,70 @@ class XlaBuilder {
// Enqueues a complex compose instruction onto the computation.
XlaOp Complex(const XlaOp& real, const XlaOp& imag,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Enqueues a complex conjugate instruction onto the computation.
XlaOp Conj(const XlaOp& operand);
// Enqueues an add instruction onto the computation.
XlaOp Add(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Enqueues a subtract instruction onto the computation.
XlaOp Sub(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Enqueues a multiply instruction onto the computation.
XlaOp Mul(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Enqueues a divide instruction onto the computation.
XlaOp Div(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Enqueues a remainder instruction onto the computation.
XlaOp Rem(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Enqueues a max instruction onto the computation.
XlaOp Max(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Enqueues a min instruction onto the computation.
XlaOp Min(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Element-wise logical operators
XlaOp And(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
XlaOp Or(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
XlaOp Xor(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
XlaOp Not(const XlaOp& operand);
XlaOp ShiftLeft(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
- XlaOp ShiftRightArithmetic(
- const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
- XlaOp ShiftRightLogical(
- const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
+ XlaOp ShiftRightArithmetic(const XlaOp& lhs, const XlaOp& rhs,
+ absl::Span<const int64> broadcast_dimensions = {});
+ XlaOp ShiftRightLogical(const XlaOp& lhs, const XlaOp& rhs,
+ absl::Span<const int64> broadcast_dimensions = {});
// Reduces an array among the provided dimensions, given "computation" as a
// reduction operator.
XlaOp Reduce(const XlaOp& operand, const XlaOp& init_value,
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<int64> dimensions_to_reduce);
+ absl::Span<const int64> dimensions_to_reduce);
// Reduces several arrays simultaneously among the provided dimensions, given
// "computation" as a reduction operator.
- XlaOp Reduce(tensorflow::gtl::ArraySlice<XlaOp> operands,
- tensorflow::gtl::ArraySlice<XlaOp> init_values,
+ XlaOp Reduce(absl::Span<const XlaOp> operands,
+ absl::Span<const XlaOp> init_values,
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<int64> dimensions_to_reduce);
+ absl::Span<const int64> dimensions_to_reduce);
// Convenience wrapper around the above that reduces all the dimensions in the
// operand shape.
@@ -674,25 +665,23 @@ class XlaBuilder {
// Enqueues a windowed reduce instruction onto the computation.
XlaOp ReduceWindow(const XlaOp& operand, const XlaOp& init_value,
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- Padding padding);
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides, Padding padding);
// As ReduceWindow(), but the padding is given in the format
// returned by MakePadding().
XlaOp ReduceWindowWithGeneralPadding(
const XlaOp& operand, const XlaOp& init_value,
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding);
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding);
// Returns the sum of the operand value within each subgroup of replicas. All
// replicas supply one input to the sum and all replicas receive the resulting
// sum for each subgroup.
- XlaOp CrossReplicaSum(
- const XlaOp& operand,
- tensorflow::gtl::ArraySlice<ReplicaGroup> replica_groups = {});
+ XlaOp CrossReplicaSum(const XlaOp& operand,
+ absl::Span<const ReplicaGroup> replica_groups = {});
// Enqueues an operation that do an AllReduce of the operand cross cores. Here
// AllReduce means doing a reduction on the input operand cross cores and then
@@ -714,7 +703,7 @@ class XlaBuilder {
// TODO(b/79737069): Rename this to AllReduce when it's ready to use.
XlaOp CrossReplicaSum(
const XlaOp& operand, const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<ReplicaGroup> replica_groups = {},
+ absl::Span<const ReplicaGroup> replica_groups = {},
const absl::optional<ChannelHandle>& channel_id = absl::nullopt);
// Enqueues an operation that do an Alltoall of the operand cross cores.
@@ -731,8 +720,8 @@ class XlaBuilder {
// Enqueues an operation that scatters the `source` array to the selected
// indices of each window.
XlaOp SelectAndScatter(const XlaOp& operand, const XlaComputation& select,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides,
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides,
Padding padding, const XlaOp& source,
const XlaOp& init_value,
const XlaComputation& scatter);
@@ -741,18 +730,17 @@ class XlaBuilder {
// returned by MakePadding().
XlaOp SelectAndScatterWithGeneralPadding(
const XlaOp& operand, const XlaComputation& select,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
- const XlaOp& source, const XlaOp& init_value,
- const XlaComputation& scatter);
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding, const XlaOp& source,
+ const XlaOp& init_value, const XlaComputation& scatter);
// Enqueues an abs instruction onto the computation.
XlaOp Abs(const XlaOp& operand);
// Enqueues a atan2 instruction onto the computation.
XlaOp Atan2(const XlaOp& y, const XlaOp& x,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Enqueues an exp instruction onto the computation.
XlaOp Exp(const XlaOp& operand);
@@ -799,7 +787,7 @@ class XlaBuilder {
// Enqueues a lhs^rhs computation onto the computation.
XlaOp Pow(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Enqueues an operator that tests if the operand's values are finite, i.e.,
// not Inf or NaN. Defined only for floating-point types. Returns an array of
@@ -829,14 +817,12 @@ class XlaBuilder {
XlaOp Neg(const XlaOp& operand);
// Enqueues a transpose instruction onto the computation.
- XlaOp Transpose(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> permutation);
+ XlaOp Transpose(const XlaOp& operand, absl::Span<const int64> permutation);
// Enqueues a reverse instruction onto the computation. The order of the
// elements in the given dimensions is reversed (i.e., the element at index i
// is moved to index dimension_size - 1 - i).
- XlaOp Rev(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> dimensions);
+ XlaOp Rev(const XlaOp& operand, absl::Span<const int64> dimensions);
// Enqueues a sort (as increasing order) instruction onto the computation.
// If only keys are provided:
@@ -861,10 +847,9 @@ class XlaBuilder {
XlaOp Clamp(const XlaOp& min, const XlaOp& operand, const XlaOp& max);
// Enqueues a map instruction onto the computation.
- XlaOp Map(tensorflow::gtl::ArraySlice<XlaOp> operands,
- const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<int64> dimensions,
- tensorflow::gtl::ArraySlice<XlaOp> static_operands = {});
+ XlaOp Map(absl::Span<const XlaOp> operands, const XlaComputation& computation,
+ absl::Span<const int64> dimensions,
+ absl::Span<const XlaOp> static_operands = {});
// Enqueues a N(mu, sigma) random number generation instruction onto the
// computation.
@@ -891,7 +876,7 @@ class XlaBuilder {
// Enqueues a Gather node onto the computation.
XlaOp Gather(const XlaOp& input, const XlaOp& start_indices,
const GatherDimensionNumbers& dimension_numbers,
- tensorflow::gtl::ArraySlice<int64> slice_sizes);
+ absl::Span<const int64> slice_sizes);
// Enqueues a Scatter node onto the computation.
XlaOp Scatter(const XlaOp& input, const XlaOp& scatter_indices,
@@ -919,7 +904,7 @@ class XlaBuilder {
// Enqueues an AfterAll operation with no operands producing a token-shaped
// value.
- XlaOp AfterAll(tensorflow::gtl::ArraySlice<XlaOp> tokens);
+ XlaOp AfterAll(absl::Span<const XlaOp> tokens);
// Enqueues a Recv node onto the computation. The data comes from a Send
// instruction that shares the same channel handle and its shape must
@@ -966,9 +951,8 @@ class XlaBuilder {
const XlaOp& grad_output, float epsilon,
int64 feature_index);
- StatusOr<XlaOp> AddInstruction(
- HloInstructionProto&& instr, HloOpcode opcode,
- tensorflow::gtl::ArraySlice<XlaOp> operands = {});
+ StatusOr<XlaOp> AddInstruction(HloInstructionProto&& instr, HloOpcode opcode,
+ absl::Span<const XlaOp> operands = {});
void AddCalledComputation(const XlaComputation& computation,
HloInstructionProto* instr);
@@ -982,19 +966,17 @@ class XlaBuilder {
// broadcast_dimensions specifies which dimensions to use for broadcasting
// when the operation is between tensors of different ranks.
XlaOp BinaryOp(HloOpcode binop, const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ absl::Span<const int64> broadcast_dimensions);
// Internal helper method that does the building for an arbitrary ternary op.
XlaOp TernaryOp(HloOpcode triop, const XlaOp& lhs, const XlaOp& rhs,
const XlaOp& ehs);
XlaOp RngOp(RandomDistribution distribution,
- tensorflow::gtl::ArraySlice<XlaOp> parameters,
- const Shape& shape);
+ absl::Span<const XlaOp> parameters, const Shape& shape);
- StatusOr<XlaOp> InDimBroadcast(
- const Shape& shape, const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ StatusOr<XlaOp> InDimBroadcast(const Shape& shape, const XlaOp& operand,
+ absl::Span<const int64> broadcast_dimensions);
// Internal helper method that creates a sequence of instructions that
// performs an explicit broadcast of the operand to the target shape.
@@ -1010,7 +992,7 @@ class XlaBuilder {
// Returns shapes for the operands.
StatusOr<std::vector<Shape>> GetOperandShapes(
- tensorflow::gtl::ArraySlice<XlaOp> operands) const;
+ absl::Span<const XlaOp> operands) const;
// A visitor which checks whether an operation is a compile-time constant,
// meaning that it doesn't depend on any parameters, or on any stateful
@@ -1027,12 +1009,11 @@ class XlaBuilder {
// Helper function for creating a Window proto from user-supplied data.
// Returns error if the user-supplied data was invalid.
- StatusOr<Window> MakeWindow(
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
- tensorflow::gtl::ArraySlice<int64> lhs_dilation,
- tensorflow::gtl::ArraySlice<int64> rhs_dilation) const;
+ StatusOr<Window> MakeWindow(absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding,
+ absl::Span<const int64> lhs_dilation,
+ absl::Span<const int64> rhs_dilation) const;
string name_; // Name to use for the built computation.
@@ -1076,7 +1057,7 @@ class XlaBuilder {
friend XlaOp ConstantR0(XlaBuilder* builder, NativeT value);
template <typename NativeT>
friend XlaOp ConstantR1(XlaBuilder* builder,
- tensorflow::gtl::ArraySlice<NativeT> values);
+ absl::Span<const NativeT> values);
friend XlaOp ConstantR1(XlaBuilder* builder,
const tensorflow::core::Bitmap& values);
template <typename NativeT>
@@ -1116,193 +1097,183 @@ class XlaBuilder {
friend XlaOp ConstantR1(XlaBuilder* builder, int64 length, NativeT value);
friend XlaOp Broadcast(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> broadcast_sizes);
+ absl::Span<const int64> broadcast_sizes);
friend XlaOp BroadcastInDim(
const XlaOp& operand, const Shape& shape,
- const tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ const absl::Span<const int64> broadcast_dimensions);
friend XlaOp Pad(const XlaOp& operand, const XlaOp& padding_value,
const PaddingConfig& padding_config);
- friend XlaOp Reshape(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> dimensions,
- tensorflow::gtl::ArraySlice<int64> new_sizes);
+ friend XlaOp Reshape(const XlaOp& operand, absl::Span<const int64> dimensions,
+ absl::Span<const int64> new_sizes);
- friend XlaOp Reshape(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> new_sizes);
+ friend XlaOp Reshape(const XlaOp& operand, absl::Span<const int64> new_sizes);
friend XlaOp Collapse(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> dimensions);
+ absl::Span<const int64> dimensions);
friend XlaOp Slice(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> start_indices,
- tensorflow::gtl::ArraySlice<int64> limit_indices,
- tensorflow::gtl::ArraySlice<int64> strides);
+ absl::Span<const int64> start_indices,
+ absl::Span<const int64> limit_indices,
+ absl::Span<const int64> strides);
friend XlaOp SliceInDim(const XlaOp& operand, int64 start_index,
int64 limit_index, int64 stride, int64 dimno);
friend XlaOp DynamicSlice(const XlaOp& operand, const XlaOp& start_indices,
- tensorflow::gtl::ArraySlice<int64> slice_sizes);
+ absl::Span<const int64> slice_sizes);
friend XlaOp DynamicUpdateSlice(const XlaOp& operand, const XlaOp& update,
const XlaOp& start_indices);
friend XlaOp ConcatInDim(XlaBuilder* builder,
- tensorflow::gtl::ArraySlice<XlaOp> operands,
- int64 dimension);
+ absl::Span<const XlaOp> operands, int64 dimension);
friend void Trace(const string& tag, const XlaOp& operand);
friend XlaOp Select(const XlaOp& pred, const XlaOp& on_true,
const XlaOp& on_false);
- friend XlaOp Tuple(XlaBuilder* builder,
- tensorflow::gtl::ArraySlice<XlaOp> elements);
+ friend XlaOp Tuple(XlaBuilder* builder, absl::Span<const XlaOp> elements);
friend XlaOp GetTupleElement(const XlaOp& tuple_data, int64 index);
friend XlaOp Eq(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ absl::Span<const int64> broadcast_dimensions);
friend XlaOp Ne(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ absl::Span<const int64> broadcast_dimensions);
friend XlaOp Ge(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ absl::Span<const int64> broadcast_dimensions);
friend XlaOp Gt(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ absl::Span<const int64> broadcast_dimensions);
friend XlaOp Lt(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ absl::Span<const int64> broadcast_dimensions);
friend XlaOp Le(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ absl::Span<const int64> broadcast_dimensions);
friend XlaOp Dot(const XlaOp& lhs, const XlaOp& rhs,
const PrecisionConfigProto* precision_config_proto);
friend XlaOp DotGeneral(const XlaOp& lhs, const XlaOp& rhs,
const DotDimensionNumbers& dimension_number,
const PrecisionConfigProto* precision_config_proto);
friend XlaOp Conv(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- Padding padding, int64 feature_group_count,
+ absl::Span<const int64> window_strides, Padding padding,
+ int64 feature_group_count,
const PrecisionConfigProto* precision_config_proto);
friend XlaOp ConvWithGeneralPadding(
const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding,
int64 feature_group_count,
const PrecisionConfigProto* precision_config_proto);
friend XlaOp ConvWithGeneralDimensions(
const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> window_strides, Padding padding,
- const ConvolutionDimensionNumbers& dimension_numbers,
- int64 feature_group_count,
- const PrecisionConfigProto* precision_config_proto);
- friend XlaOp ConvGeneral(
- const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
+ absl::Span<const int64> window_strides, Padding padding,
const ConvolutionDimensionNumbers& dimension_numbers,
int64 feature_group_count,
const PrecisionConfigProto* precision_config_proto);
+ friend XlaOp ConvGeneral(const XlaOp& lhs, const XlaOp& rhs,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding,
+ const ConvolutionDimensionNumbers& dimension_numbers,
+ int64 feature_group_count,
+ const PrecisionConfigProto* precision_config_proto);
friend XlaOp ConvGeneralDilated(
const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
- tensorflow::gtl::ArraySlice<int64> lhs_dilation,
- tensorflow::gtl::ArraySlice<int64> rhs_dilation,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding,
+ absl::Span<const int64> lhs_dilation,
+ absl::Span<const int64> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers,
int64 feature_group_count,
const PrecisionConfigProto* precision_config_proto);
friend XlaOp Fft(const XlaOp& operand, FftType fft_type,
- tensorflow::gtl::ArraySlice<int64> fft_length);
+ absl::Span<const int64> fft_length);
friend XlaOp Infeed(XlaBuilder* builder, const Shape& shape,
const string& config);
friend void Outfeed(const XlaOp& operand, const Shape& shape_with_layout,
const string& outfeed_config);
friend XlaOp Call(XlaBuilder* builder, const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<XlaOp> operands);
+ absl::Span<const XlaOp> operands);
friend XlaOp CustomCall(XlaBuilder* builder, const string& call_target_name,
- tensorflow::gtl::ArraySlice<XlaOp> operands,
- const Shape& shape);
+ absl::Span<const XlaOp> operands, const Shape& shape);
friend XlaOp Complex(const XlaOp& real, const XlaOp& imag,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ absl::Span<const int64> broadcast_dimensions);
friend XlaOp Conj(const XlaOp& operand);
friend XlaOp Add(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ absl::Span<const int64> broadcast_dimensions);
friend XlaOp Sub(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ absl::Span<const int64> broadcast_dimensions);
friend XlaOp Mul(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ absl::Span<const int64> broadcast_dimensions);
friend XlaOp Div(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ absl::Span<const int64> broadcast_dimensions);
friend XlaOp Rem(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ absl::Span<const int64> broadcast_dimensions);
friend XlaOp Max(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ absl::Span<const int64> broadcast_dimensions);
friend XlaOp Min(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ absl::Span<const int64> broadcast_dimensions);
friend XlaOp And(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ absl::Span<const int64> broadcast_dimensions);
friend XlaOp Or(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ absl::Span<const int64> broadcast_dimensions);
friend XlaOp Xor(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ absl::Span<const int64> broadcast_dimensions);
friend XlaOp Not(const XlaOp& operand);
- friend XlaOp ShiftLeft(
- const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ friend XlaOp ShiftLeft(const XlaOp& lhs, const XlaOp& rhs,
+ absl::Span<const int64> broadcast_dimensions);
friend XlaOp ShiftRightArithmetic(
const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
- friend XlaOp ShiftRightLogical(
- const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ absl::Span<const int64> broadcast_dimensions);
+ friend XlaOp ShiftRightLogical(const XlaOp& lhs, const XlaOp& rhs,
+ absl::Span<const int64> broadcast_dimensions);
friend XlaOp Reduce(const XlaOp& operand, const XlaOp& init_value,
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<int64> dimensions_to_reduce);
- friend XlaOp Reduce(XlaBuilder* builder,
- tensorflow::gtl::ArraySlice<XlaOp> operands,
- tensorflow::gtl::ArraySlice<XlaOp> init_values,
+ absl::Span<const int64> dimensions_to_reduce);
+ friend XlaOp Reduce(XlaBuilder* builder, absl::Span<const XlaOp> operands,
+ absl::Span<const XlaOp> init_values,
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<int64> dimensions_to_reduce);
+ absl::Span<const int64> dimensions_to_reduce);
friend XlaOp ReduceAll(const XlaOp& operand, const XlaOp& init_value,
const XlaComputation& computation);
- friend XlaOp ReduceWindow(
- const XlaOp& operand, const XlaOp& init_value,
- const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides, Padding padding);
+ friend XlaOp ReduceWindow(const XlaOp& operand, const XlaOp& init_value,
+ const XlaComputation& computation,
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides,
+ Padding padding);
friend XlaOp ReduceWindowWithGeneralPadding(
const XlaOp& operand, const XlaOp& init_value,
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding);
- friend XlaOp CrossReplicaSum(
- const XlaOp& operand,
- tensorflow::gtl::ArraySlice<ReplicaGroup> replica_groups);
- friend XlaOp CrossReplicaSum(
- const XlaOp& operand, const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<ReplicaGroup> replica_groups,
- const absl::optional<ChannelHandle>& channel_id);
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding);
+ friend XlaOp CrossReplicaSum(const XlaOp& operand,
+ absl::Span<const ReplicaGroup> replica_groups);
+ friend XlaOp CrossReplicaSum(const XlaOp& operand,
+ const XlaComputation& computation,
+ absl::Span<const ReplicaGroup> replica_groups,
+ const absl::optional<ChannelHandle>& channel_id);
friend XlaOp AllToAll(const XlaOp& operand, int64 split_dimension,
int64 concat_dimension, int64 split_count,
const std::vector<ReplicaGroup>& replica_groups);
friend XlaOp CollectivePermute(
const XlaOp& operand,
const std::vector<std::pair<int64, int64>>& source_target_pairs);
- friend XlaOp SelectAndScatter(
- const XlaOp& operand, const XlaComputation& select,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides, Padding padding,
- const XlaOp& source, const XlaOp& init_value,
- const XlaComputation& scatter);
+ friend XlaOp SelectAndScatter(const XlaOp& operand,
+ const XlaComputation& select,
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides,
+ Padding padding, const XlaOp& source,
+ const XlaOp& init_value,
+ const XlaComputation& scatter);
friend XlaOp SelectAndScatterWithGeneralPadding(
const XlaOp& operand, const XlaComputation& select,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
- const XlaOp& source, const XlaOp& init_value,
- const XlaComputation& scatter);
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding, const XlaOp& source,
+ const XlaOp& init_value, const XlaComputation& scatter);
friend XlaOp Abs(const XlaOp& operand);
friend XlaOp Atan2(const XlaOp& y, const XlaOp& x,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ absl::Span<const int64> broadcast_dimensions);
friend XlaOp Exp(const XlaOp& operand);
friend XlaOp Expm1(const XlaOp& operand);
friend XlaOp Floor(const XlaOp& operand);
@@ -1318,7 +1289,7 @@ class XlaBuilder {
friend XlaOp Real(const XlaOp& operand);
friend XlaOp Imag(const XlaOp& operand);
friend XlaOp Pow(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ absl::Span<const int64> broadcast_dimensions);
friend XlaOp IsFinite(const XlaOp& operand);
friend XlaOp Iota(XlaBuilder* builder, const Shape& shape,
int64 iota_dimension);
@@ -1329,16 +1300,14 @@ class XlaBuilder {
PrimitiveType new_element_type);
friend XlaOp Neg(const XlaOp& operand);
friend XlaOp Transpose(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> permutation);
- friend XlaOp Rev(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> dimensions);
+ absl::Span<const int64> permutation);
+ friend XlaOp Rev(const XlaOp& operand, absl::Span<const int64> dimensions);
friend XlaOp Sort(XlaOp keys, absl::optional<XlaOp> values, int64 dimension);
friend XlaOp Clamp(const XlaOp& min, const XlaOp& operand, const XlaOp& max);
- friend XlaOp Map(XlaBuilder* builder,
- tensorflow::gtl::ArraySlice<XlaOp> operands,
+ friend XlaOp Map(XlaBuilder* builder, absl::Span<const XlaOp> operands,
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<int64> dimensions,
- tensorflow::gtl::ArraySlice<XlaOp> static_operands);
+ absl::Span<const int64> dimensions,
+ absl::Span<const XlaOp> static_operands);
friend XlaOp RngNormal(const XlaOp& mu, const XlaOp& sigma,
const Shape& shape);
friend XlaOp RngUniform(const XlaOp& a, const XlaOp& b, const Shape& shape);
@@ -1352,7 +1321,7 @@ class XlaBuilder {
const int mantissa_bits);
friend XlaOp Gather(const XlaOp& input, const XlaOp& start_indices,
const GatherDimensionNumbers& dimension_numbers,
- tensorflow::gtl::ArraySlice<int64> slice_sizes);
+ absl::Span<const int64> slice_sizes);
friend XlaOp Scatter(const XlaOp& input, const XlaOp& scatter_indices,
const XlaOp& updates,
const XlaComputation& update_computation,
@@ -1386,8 +1355,7 @@ class XlaBuilder {
const Shape& shape_with_layout,
const string& outfeed_config);
friend XlaOp CreateToken(XlaBuilder* builder);
- friend XlaOp AfterAll(XlaBuilder* builder,
- tensorflow::gtl::ArraySlice<XlaOp> tokens);
+ friend XlaOp AfterAll(XlaBuilder* builder, absl::Span<const XlaOp> tokens);
};
// RAII-style object: sets the current sharding assignment in builder on
@@ -1451,8 +1419,7 @@ XlaOp ConstantLiteral(XlaBuilder* builder, const LiteralSlice& literal);
template <typename NativeT>
XlaOp ConstantR0(XlaBuilder* builder, NativeT value);
template <typename NativeT>
-XlaOp ConstantR1(XlaBuilder* builder,
- tensorflow::gtl::ArraySlice<NativeT> values);
+XlaOp ConstantR1(XlaBuilder* builder, absl::Span<const NativeT> values);
XlaOp ConstantR1(XlaBuilder* builder, const tensorflow::core::Bitmap& values);
template <typename NativeT>
XlaOp ConstantR2(XlaBuilder* builder,
@@ -1501,8 +1468,7 @@ XlaOp ConstantR1(XlaBuilder* builder, int64 length, NativeT value);
// The new dimensions index into copies of the operand, i.e.
//
// output[i0, ..., iN, j0, ..., jM] = operand[j0, ..., jM]
-XlaOp Broadcast(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> broadcast_sizes);
+XlaOp Broadcast(const XlaOp& operand, absl::Span<const int64> broadcast_sizes);
// Performs in-dimension-style broadcast.
//
@@ -1521,9 +1487,8 @@ XlaOp Broadcast(const XlaOp& operand,
// will generate output
// [1 , 1]
// [2 , 2]
-XlaOp BroadcastInDim(
- const XlaOp& operand, const Shape& shape,
- const tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+XlaOp BroadcastInDim(const XlaOp& operand, const Shape& shape,
+ const absl::Span<const int64> broadcast_dimensions);
// Enqueues a pad operation onto the computation that pads the given value on
// the edges as well as between the elements of the input. padding_config
@@ -1536,15 +1501,13 @@ XlaOp Pad(const XlaOp& operand, const XlaOp& padding_value,
// given, followed by reshaping it into the shape with the given dimension
// sizes (also major to minor). Conceptually, this is a limited form of
// "shape casting".
-XlaOp Reshape(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> dimensions,
- tensorflow::gtl::ArraySlice<int64> new_sizes);
+XlaOp Reshape(const XlaOp& operand, absl::Span<const int64> dimensions,
+ absl::Span<const int64> new_sizes);
// Enqueues an operation onto the computation that collapses the operand, from
// first to last dimension (C order), then reshapes it to the given dimension
// sizes. Conceptually, this is a limited form of "shape casting".
-XlaOp Reshape(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> new_sizes);
+XlaOp Reshape(const XlaOp& operand, absl::Span<const int64> new_sizes);
// Wrapper for Reshape.
// Enqueues an operation to collapse the provided dimensions; e.g. an
@@ -1564,8 +1527,7 @@ XlaOp Reshape(const XlaOp& operand,
//
// This could potentially cause data to be moved -- it provides a more
// structured form of reshaping than an arbitrary Reshape operation.
-XlaOp Collapse(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> dimensions);
+XlaOp Collapse(const XlaOp& operand, absl::Span<const int64> dimensions);
// Enqueues a slice operation onto the computation that slices the operand
// from the start indices to the limit indices; e.g.
@@ -1578,10 +1540,9 @@ XlaOp Collapse(const XlaOp& operand,
// Note that "limit" means up-to-but-not-including; i.e. [start, limit) in 1D
// range notation.
// The strides parameter determines the stride over the slice
-XlaOp Slice(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> start_indices,
- tensorflow::gtl::ArraySlice<int64> limit_indices,
- tensorflow::gtl::ArraySlice<int64> strides);
+XlaOp Slice(const XlaOp& operand, absl::Span<const int64> start_indices,
+ absl::Span<const int64> limit_indices,
+ absl::Span<const int64> strides);
// Enqueues a slice operation in a given dimension, taking all other
// dimensions as they are; e.g. if dimno is 1 from start_index 2 to
@@ -1602,7 +1563,7 @@ XlaOp SliceInDim(const XlaOp& operand, int64 start_index, int64 limit_index,
// Slice index calculations are computed modulo input dimension sizes to
// prevent dynamic start indices from generating out-of-bound array accesses.
XlaOp DynamicSlice(const XlaOp& operand, const XlaOp& start_indices,
- tensorflow::gtl::ArraySlice<int64> slice_sizes);
+ absl::Span<const int64> slice_sizes);
// Enqueues a dynamic update slice operation onto the computation, which
// updates a slice of 'operand' with 'update' at dynamic 'start_indices'.
@@ -1625,8 +1586,8 @@ XlaOp DynamicUpdateSlice(const XlaOp& operand, const XlaOp& update,
// Enqueues a concatenate instruction onto the computation. 'operands' must
// have >= 1 entry.
-XlaOp ConcatInDim(XlaBuilder* builder,
- tensorflow::gtl::ArraySlice<XlaOp> operands, int64 dimension);
+XlaOp ConcatInDim(XlaBuilder* builder, absl::Span<const XlaOp> operands,
+ int64 dimension);
// Enqueue a tracing operation onto the computation; the computation will emit
// a logging message with the operand.
@@ -1637,34 +1598,34 @@ void Trace(const string& tag, const XlaOp& operand);
XlaOp Select(const XlaOp& pred, const XlaOp& on_true, const XlaOp& on_false);
// Enqueues a tuple-creation instruction onto the computation.
-XlaOp Tuple(XlaBuilder* builder, tensorflow::gtl::ArraySlice<XlaOp> elements);
+XlaOp Tuple(XlaBuilder* builder, absl::Span<const XlaOp> elements);
// Enqueues a tuple-element-get instruction onto the computation.
XlaOp GetTupleElement(const XlaOp& tuple_data, int64 index);
// Enqueues an equal-to comparison instruction onto the computation.
XlaOp Eq(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Enqueues a not-equal comparison instruction onto the computation.
XlaOp Ne(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Enqueues a greater-or-equal comparison instruction onto the computation.
XlaOp Ge(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Enqueues a greater-than comparison instruction onto the computation.
XlaOp Gt(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Enqueues a less-than comparison instruction onto the computation.
XlaOp Lt(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Enqueues a less-or-equal comparison instruction onto the computation.
XlaOp Le(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Enqueues a dot instruction onto the computation.
XlaOp Dot(const XlaOp& lhs, const XlaOp& rhs,
@@ -1678,33 +1639,31 @@ XlaOp DotGeneral(const XlaOp& lhs, const XlaOp& rhs,
// Enqueues a convolution instruction onto the computation, which uses the
// default convolution dimension numbers.
XlaOp Conv(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> window_strides, Padding padding,
+ absl::Span<const int64> window_strides, Padding padding,
int64 feature_group_count = 1,
const PrecisionConfigProto* precision_config_proto = nullptr);
// Enqueues a convolution instruction onto the computation, with the caller
// provided padding configuration in the format returned by MakePadding().
XlaOp ConvWithGeneralPadding(
- const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
+ const XlaOp& lhs, const XlaOp& rhs, absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding,
int64 feature_group_count = 1,
const PrecisionConfigProto* precision_config_proto = nullptr);
// Enqueues a convolution instruction onto the computation, with the caller
// provided dimension numbers configuration.
XlaOp ConvWithGeneralDimensions(
- const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> window_strides, Padding padding,
- const ConvolutionDimensionNumbers& dimension_numbers,
+ const XlaOp& lhs, const XlaOp& rhs, absl::Span<const int64> window_strides,
+ Padding padding, const ConvolutionDimensionNumbers& dimension_numbers,
int64 feature_group_count = 1,
const PrecisionConfigProto* precision_config_proto = nullptr);
// Enqueues a convolution instruction onto the computation, with the caller
// provided padding configuration as well as the dimension numbers.
XlaOp ConvGeneral(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding,
const ConvolutionDimensionNumbers& dimension_numbers,
int64 feature_group_count = 1,
const PrecisionConfigProto* precision_config_proto = nullptr);
@@ -1712,11 +1671,9 @@ XlaOp ConvGeneral(const XlaOp& lhs, const XlaOp& rhs,
// Enqueues a convolution instruction onto the computation, with the caller
// provided padding configuration, dilation factors and dimension numbers.
XlaOp ConvGeneralDilated(
- const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
- tensorflow::gtl::ArraySlice<int64> lhs_dilation,
- tensorflow::gtl::ArraySlice<int64> rhs_dilation,
+ const XlaOp& lhs, const XlaOp& rhs, absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding,
+ absl::Span<const int64> lhs_dilation, absl::Span<const int64> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers,
int64 feature_group_count = 1,
const PrecisionConfigProto* precision_config_proto = nullptr);
@@ -1724,7 +1681,7 @@ XlaOp ConvGeneralDilated(
// Enqueues an FFT instruction onto the computation, of the given type and
// with the given FFT length.
XlaOp Fft(const XlaOp& operand, FftType fft_type,
- tensorflow::gtl::ArraySlice<int64> fft_length);
+ absl::Span<const int64> fft_length);
// Enqueues an infeed instruction onto the computation, which writes data of
// the given shape to the infeed buffer of the device.
@@ -1756,15 +1713,14 @@ XlaOp OutfeedWithToken(const XlaOp& operand, const XlaOp& token,
// Enqueues a call instruction onto the computation.
XlaOp Call(XlaBuilder* builder, const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<XlaOp> operands);
+ absl::Span<const XlaOp> operands);
// Enqueues a custom call instruction onto the computation.
// During code generation, a call instruction is emitted which targets a
// symbol with the name |call_target_name|. The |operands| are passed to the
// call instruction. |shape| is the resultant shape.
XlaOp CustomCall(XlaBuilder* builder, const string& call_target_name,
- tensorflow::gtl::ArraySlice<XlaOp> operands,
- const Shape& shape);
+ absl::Span<const XlaOp> operands, const Shape& shape);
// The following methods enqueue element-wise binary arithmetic operations
// onto the computation. The shapes of the operands have to match unless one
@@ -1773,72 +1729,70 @@ XlaOp CustomCall(XlaBuilder* builder, const string& call_target_name,
// Enqueues a complex compose instruction onto the computation.
XlaOp Complex(const XlaOp& real, const XlaOp& imag,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Enqueues a complex conjugate instruction onto the computation.
XlaOp Conj(const XlaOp& operand);
// Enqueues an add instruction onto the computation.
XlaOp Add(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Enqueues a subtract instruction onto the computation.
XlaOp Sub(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Enqueues a multiply instruction onto the computation.
XlaOp Mul(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Enqueues a divide instruction onto the computation.
XlaOp Div(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Enqueues a remainder instruction onto the computation.
XlaOp Rem(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Enqueues a max instruction onto the computation.
XlaOp Max(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Enqueues a min instruction onto the computation.
XlaOp Min(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Element-wise logical operators
XlaOp And(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
XlaOp Or(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
XlaOp Xor(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
XlaOp Not(const XlaOp& operand);
XlaOp ShiftLeft(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
-XlaOp ShiftRightArithmetic(
- const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
-XlaOp ShiftRightLogical(
- const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
+XlaOp ShiftRightArithmetic(const XlaOp& lhs, const XlaOp& rhs,
+ absl::Span<const int64> broadcast_dimensions = {});
+XlaOp ShiftRightLogical(const XlaOp& lhs, const XlaOp& rhs,
+ absl::Span<const int64> broadcast_dimensions = {});
// Reduces an array among the provided dimensions, given "computation" as a
// reduction operator.
XlaOp Reduce(const XlaOp& operand, const XlaOp& init_value,
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<int64> dimensions_to_reduce);
+ absl::Span<const int64> dimensions_to_reduce);
// Reduces several arrays simultaneously among the provided dimensions, given
// "computation" as a reduction operator.
-XlaOp Reduce(XlaBuilder* builder, tensorflow::gtl::ArraySlice<XlaOp> operands,
- tensorflow::gtl::ArraySlice<XlaOp> init_values,
+XlaOp Reduce(XlaBuilder* builder, absl::Span<const XlaOp> operands,
+ absl::Span<const XlaOp> init_values,
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<int64> dimensions_to_reduce);
+ absl::Span<const int64> dimensions_to_reduce);
// Convenience wrapper around the above that reduces all the dimensions in the
// operand shape.
@@ -1848,25 +1802,23 @@ XlaOp ReduceAll(const XlaOp& operand, const XlaOp& init_value,
// Enqueues a windowed reduce instruction onto the computation.
XlaOp ReduceWindow(const XlaOp& operand, const XlaOp& init_value,
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- Padding padding);
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides, Padding padding);
// As ReduceWindow(), but the padding is given in the format
// returned by MakePadding().
XlaOp ReduceWindowWithGeneralPadding(
const XlaOp& operand, const XlaOp& init_value,
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding);
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding);
// Returns the sum of the operand value within each subgroup of replicas. All
// replicas supply one input to the sum and all replicas receive the resulting
// sum for each subgroup.
-XlaOp CrossReplicaSum(
- const XlaOp& operand,
- tensorflow::gtl::ArraySlice<ReplicaGroup> replica_groups = {});
+XlaOp CrossReplicaSum(const XlaOp& operand,
+ absl::Span<const ReplicaGroup> replica_groups = {});
// Enqueues an operation that do an AllReduce of the operand cross cores. Here
// AllReduce means doing a reduction on the input operand cross cores and then
@@ -1887,7 +1839,7 @@ XlaOp CrossReplicaSum(
// TODO(b/79737069): Rename this to AllReduce when it's ready to use.
XlaOp CrossReplicaSum(
const XlaOp& operand, const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<ReplicaGroup> replica_groups = {},
+ absl::Span<const ReplicaGroup> replica_groups = {},
const absl::optional<ChannelHandle>& channel_id = absl::nullopt);
// Enqueues an operation that do an Alltoall of the operand cross cores.
@@ -1910,27 +1862,26 @@ XlaOp CollectivePermute(
// Enqueues an operation that scatters the `source` array to the selected
// indices of each window.
XlaOp SelectAndScatter(const XlaOp& operand, const XlaComputation& select,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- Padding padding, const XlaOp& source,
- const XlaOp& init_value, const XlaComputation& scatter);
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides, Padding padding,
+ const XlaOp& source, const XlaOp& init_value,
+ const XlaComputation& scatter);
// As SelectAndScatter(), but the padding is given in the format
// returned by MakePadding().
XlaOp SelectAndScatterWithGeneralPadding(
const XlaOp& operand, const XlaComputation& select,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
- const XlaOp& source, const XlaOp& init_value,
- const XlaComputation& scatter);
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding, const XlaOp& source,
+ const XlaOp& init_value, const XlaComputation& scatter);
// Enqueues an abs instruction onto the computation.
XlaOp Abs(const XlaOp& operand);
// Enqueues a atan2 instruction onto the computation.
XlaOp Atan2(const XlaOp& y, const XlaOp& x,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Enqueues an exp instruction onto the computation.
XlaOp Exp(const XlaOp& operand);
@@ -1977,7 +1928,7 @@ XlaOp Imag(const XlaOp& operand);
// Enqueues a lhs^rhs computation onto the computation.
XlaOp Pow(const XlaOp& lhs, const XlaOp& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions = {});
+ absl::Span<const int64> broadcast_dimensions = {});
// Enqueues an operator that tests if the operand's values are finite, i.e.,
// not Inf or NaN. Defined only for floating-point types. Returns an array of
@@ -2005,13 +1956,12 @@ XlaOp BitcastConvertType(const XlaOp& operand, PrimitiveType new_element_type);
XlaOp Neg(const XlaOp& operand);
// Enqueues a transpose instruction onto the computation.
-XlaOp Transpose(const XlaOp& operand,
- tensorflow::gtl::ArraySlice<int64> permutation);
+XlaOp Transpose(const XlaOp& operand, absl::Span<const int64> permutation);
// Enqueues a reverse instruction onto the computation. The order of the
// elements in the given dimensions is reversed (i.e., the element at index i
// is moved to index dimension_size - 1 - i).
-XlaOp Rev(const XlaOp& operand, tensorflow::gtl::ArraySlice<int64> dimensions);
+XlaOp Rev(const XlaOp& operand, absl::Span<const int64> dimensions);
// Enqueues a sort (as increasing order) instruction onto the computation.
// If only keys are provided:
@@ -2036,10 +1986,9 @@ XlaOp Sort(XlaOp keys, absl::optional<XlaOp> values = absl::nullopt,
XlaOp Clamp(const XlaOp& min, const XlaOp& operand, const XlaOp& max);
// Enqueues a map instruction onto the computation.
-XlaOp Map(XlaBuilder* builder, tensorflow::gtl::ArraySlice<XlaOp> operands,
- const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<int64> dimensions,
- tensorflow::gtl::ArraySlice<XlaOp> static_operands = {});
+XlaOp Map(XlaBuilder* builder, absl::Span<const XlaOp> operands,
+ const XlaComputation& computation, absl::Span<const int64> dimensions,
+ absl::Span<const XlaOp> static_operands = {});
// Enqueues a N(mu, sigma) random number generation instruction onto the
// computation.
@@ -2066,7 +2015,7 @@ XlaOp ReducePrecision(const XlaOp& operand, const int exponent_bits,
// Enqueues a Gather node onto the computation.
XlaOp Gather(const XlaOp& input, const XlaOp& start_indices,
const GatherDimensionNumbers& dimension_numbers,
- tensorflow::gtl::ArraySlice<int64> slice_sizes);
+ absl::Span<const int64> slice_sizes);
// Enqueues a Scatter node onto the computation.
XlaOp Scatter(const XlaOp& input, const XlaOp& scatter_indices,
@@ -2124,7 +2073,7 @@ XlaOp CreateToken(XlaBuilder* builder);
// Enqueues an AfterAll instruction which produces a token-shaped value and
// takes a variadic number of token-shaped operands. The number of operands must
// be greater than zero. Used for joining tokens.
-XlaOp AfterAll(XlaBuilder* builder, tensorflow::gtl::ArraySlice<XlaOp> tokens);
+XlaOp AfterAll(XlaBuilder* builder, absl::Span<const XlaOp> tokens);
// Normalizes operand across spatial and batch dimensions for each feature.
//
@@ -2172,7 +2121,7 @@ XlaOp XlaBuilder::ConstantR0(NativeT value) {
}
template <typename NativeT>
-XlaOp XlaBuilder::ConstantR1(tensorflow::gtl::ArraySlice<NativeT> values) {
+XlaOp XlaBuilder::ConstantR1(absl::Span<const NativeT> values) {
return ConstantLiteral(*LiteralUtil::CreateR1<NativeT>(values));
}
@@ -2249,8 +2198,7 @@ XlaOp ConstantR0(XlaBuilder* builder, NativeT value) {
}
template <typename NativeT>
-XlaOp ConstantR1(XlaBuilder* builder,
- tensorflow::gtl::ArraySlice<NativeT> values) {
+XlaOp ConstantR1(XlaBuilder* builder, absl::Span<const NativeT> values) {
return ConstantLiteral(builder, *LiteralUtil::CreateR1<NativeT>(values));
}
diff --git a/tensorflow/compiler/xla/index_util.cc b/tensorflow/compiler/xla/index_util.cc
index 693dcb3a3e..3fadabcf52 100644
--- a/tensorflow/compiler/xla/index_util.cc
+++ b/tensorflow/compiler/xla/index_util.cc
@@ -27,7 +27,7 @@ limitations under the License.
namespace xla {
/* static */ int64 IndexUtil::MultidimensionalIndexToLinearIndex(
- const Shape& shape, tensorflow::gtl::ArraySlice<int64> multi_index) {
+ const Shape& shape, absl::Span<const int64> multi_index) {
DCHECK_EQ(shape.dimensions_size(), multi_index.size());
// Padding and nested layouts not supported yet.
DCHECK_EQ(0, shape.layout().padded_dimensions_size());
@@ -118,8 +118,8 @@ namespace xla {
return multi_index;
}
-/* static */ bool IndexUtil::BumpIndices(
- const Shape& shape, tensorflow::gtl::MutableArraySlice<int64> indices) {
+/* static */ bool IndexUtil::BumpIndices(const Shape& shape,
+ absl::Span<int64> indices) {
for (int64 dimno = indices.size() - 1; dimno >= 0; --dimno) {
int64 limit = shape.dimensions(dimno);
if (indices[dimno] + 1 < limit) {
@@ -149,8 +149,8 @@ namespace xla {
return stride;
}
-/* static */ bool IndexUtil::IndexInBounds(
- const Shape& shape, tensorflow::gtl::ArraySlice<int64> index) {
+/* static */ bool IndexUtil::IndexInBounds(const Shape& shape,
+ absl::Span<const int64> index) {
int64 rank = ShapeUtil::Rank(shape);
if (rank != index.size()) {
return false;
@@ -163,9 +163,8 @@ namespace xla {
return true;
}
-/* static */ int IndexUtil::CompareIndices(
- tensorflow::gtl::ArraySlice<int64> lhs,
- tensorflow::gtl::ArraySlice<int64> rhs) {
+/* static */ int IndexUtil::CompareIndices(absl::Span<const int64> lhs,
+ absl::Span<const int64> rhs) {
int64 rank = lhs.size();
CHECK_EQ(rhs.size(), rank);
for (int64 dim = 0; dim < rank; ++dim) {
diff --git a/tensorflow/compiler/xla/index_util.h b/tensorflow/compiler/xla/index_util.h
index 142006f262..d86437f83c 100644
--- a/tensorflow/compiler/xla/index_util.h
+++ b/tensorflow/compiler/xla/index_util.h
@@ -35,7 +35,7 @@ class IndexUtil {
// on the shape and its layout. The first index in the multi_index is
// dimension 0.
static int64 MultidimensionalIndexToLinearIndex(
- const Shape& shape, tensorflow::gtl::ArraySlice<int64> multi_index);
+ const Shape& shape, absl::Span<const int64> multi_index);
// Converts a linear index into multidimensional index (eg {x, y, z}) based on
// the shape and its layout. The first index in the returned multidimensional
@@ -58,8 +58,7 @@ class IndexUtil {
//
// Returns true iff the indices were successfully bumped; false if we've hit
// the limit where it can no longer be bumped in-bounds.
- static bool BumpIndices(const Shape& shape,
- tensorflow::gtl::MutableArraySlice<int64> indices);
+ static bool BumpIndices(const Shape& shape, absl::Span<int64> indices);
// Calculates the stride size (in number of elements, not byte size) of a
// given logical shape dimension (from 0 to rank-1). If available, padded
@@ -71,15 +70,14 @@ class IndexUtil {
// Returns true iff the given multi-index is contained in the bounds for the
// shape.
- static bool IndexInBounds(const Shape& shape,
- tensorflow::gtl::ArraySlice<int64> index);
+ static bool IndexInBounds(const Shape& shape, absl::Span<const int64> index);
// Compares the given indices in lexicographic order. lhs[0] and rhs[0] are
// compared first, and lhs[rank-1] and rhs[rank-1] last. If lhs is larger,
// then -1 is returned. If rhs is larger, then 1 is returned. Otherwise, 0 is
// returned.
- static int CompareIndices(tensorflow::gtl::ArraySlice<int64> lhs,
- tensorflow::gtl::ArraySlice<int64> rhs);
+ static int CompareIndices(absl::Span<const int64> lhs,
+ absl::Span<const int64> rhs);
private:
TF_DISALLOW_COPY_AND_ASSIGN(IndexUtil);
diff --git a/tensorflow/compiler/xla/layout_util.cc b/tensorflow/compiler/xla/layout_util.cc
index cce1838ef3..d310335618 100644
--- a/tensorflow/compiler/xla/layout_util.cc
+++ b/tensorflow/compiler/xla/layout_util.cc
@@ -56,7 +56,7 @@ void SetDefaultLayoutToContainer(
} // namespace
/* static */ Layout LayoutUtil::MakeLayout(
- tensorflow::gtl::ArraySlice<int64> minor_to_major) {
+ absl::Span<const int64> minor_to_major) {
Layout layout;
layout.set_format(DENSE);
for (int64 dimension_number : minor_to_major) {
@@ -66,7 +66,7 @@ void SetDefaultLayoutToContainer(
}
/* static */ Layout LayoutUtil::MakeLayoutFromMajorToMinor(
- tensorflow::gtl::ArraySlice<int64> major_to_minor) {
+ absl::Span<const int64> major_to_minor) {
Layout layout;
layout.set_format(DENSE);
for (int i = major_to_minor.size() - 1; i >= 0; i--) {
@@ -307,7 +307,7 @@ Layout CreateDefaultLayoutForRank(int64 rank) {
return false;
}
-/* static */ tensorflow::gtl::ArraySlice<int64> LayoutUtil::PaddedDimensions(
+/* static */ absl::Span<const int64> LayoutUtil::PaddedDimensions(
const Shape& shape) {
CHECK(IsDenseArray(shape));
return AsInt64Slice(shape.layout().padded_dimensions());
@@ -363,13 +363,13 @@ Layout CreateDefaultLayoutForRank(int64 rank) {
return protobuf_util::ProtobufEquals(lhs, rhs);
}
-/* static */ tensorflow::gtl::ArraySlice<int64> LayoutUtil::MinorToMajor(
+/* static */ absl::Span<const int64> LayoutUtil::MinorToMajor(
const Shape& shape) {
CHECK(IsDenseArray(shape));
return AsInt64Slice(shape.layout().minor_to_major());
}
-/* static */ tensorflow::gtl::ArraySlice<int64> LayoutUtil::MinorToMajor(
+/* static */ absl::Span<const int64> LayoutUtil::MinorToMajor(
const Layout& layout) {
CHECK(layout.format() == DENSE);
return AsInt64Slice(layout.minor_to_major());
@@ -472,7 +472,7 @@ Status LayoutUtil::CopyLayoutBetweenShapes(const Shape& src, Shape* dst) {
}
/* static */ bool LayoutUtil::AreDimensionsConsecutive(
- const Layout& layout, tensorflow::gtl::ArraySlice<int64> dims) {
+ const Layout& layout, absl::Span<const int64> dims) {
CHECK(IsDense(layout));
std::vector<int64> positions_in_layout;
for (int64 dim : dims) {
diff --git a/tensorflow/compiler/xla/layout_util.h b/tensorflow/compiler/xla/layout_util.h
index 739bbe7367..2ffef8688d 100644
--- a/tensorflow/compiler/xla/layout_util.h
+++ b/tensorflow/compiler/xla/layout_util.h
@@ -34,11 +34,11 @@ class LayoutUtil {
public:
// Creates a layout with the given minor-to-major dimension order. (This is a
// convenience function for protobuf construction.)
- static Layout MakeLayout(tensorflow::gtl::ArraySlice<int64> minor_to_major);
+ static Layout MakeLayout(absl::Span<const int64> minor_to_major);
// Similar to MakeLayout, but take indices in reverse order.
static Layout MakeLayoutFromMajorToMinor(
- tensorflow::gtl::ArraySlice<int64> major_to_minor);
+ absl::Span<const int64> major_to_minor);
// Creates a sparse layout with the given maximum number of elements. (This is
// a convenience function for protobuf construction.)
@@ -104,8 +104,7 @@ class LayoutUtil {
// Returns the padded_dimensions array for the given Shape. Requires that the
// shape is an array and has a dense layout.
- static tensorflow::gtl::ArraySlice<int64> PaddedDimensions(
- const Shape& shape);
+ static absl::Span<const int64> PaddedDimensions(const Shape& shape);
// Returns the given index of the padded_dimensions array for the given Shape.
// Requires that the shape is an array and has a dense layout.
@@ -138,8 +137,8 @@ class LayoutUtil {
// Returns the minor_to_major array for the given Shape. Requires that the
// shape is an array and has a dense layout.
- static tensorflow::gtl::ArraySlice<int64> MinorToMajor(const Shape& shape);
- static tensorflow::gtl::ArraySlice<int64> MinorToMajor(const Layout& layout);
+ static absl::Span<const int64> MinorToMajor(const Shape& shape);
+ static absl::Span<const int64> MinorToMajor(const Layout& layout);
// Major(0) is the most major logical dimension number, Major(1) is the
// second-most-major logical dimension number and so on.
@@ -196,7 +195,7 @@ class LayoutUtil {
// Returns whether the given dimensions are consecutive in the given layout,
// not necessarily in the order given.
static bool AreDimensionsConsecutive(const Layout& layout,
- tensorflow::gtl::ArraySlice<int64> dims);
+ absl::Span<const int64> dims);
// Compute a hash for `layout`.
static size_t Hash(const Layout& layout);
diff --git a/tensorflow/compiler/xla/layout_util_test.cc b/tensorflow/compiler/xla/layout_util_test.cc
index e4c825450d..f25dae6ff4 100644
--- a/tensorflow/compiler/xla/layout_util_test.cc
+++ b/tensorflow/compiler/xla/layout_util_test.cc
@@ -27,15 +27,15 @@ namespace {
class LayoutUtilTest : public ::testing::Test {
protected:
Shape MakeShapeWithLayout(PrimitiveType element_type,
- tensorflow::gtl::ArraySlice<int64> dimensions,
- tensorflow::gtl::ArraySlice<int64> minor_to_major) {
+ absl::Span<const int64> dimensions,
+ absl::Span<const int64> minor_to_major) {
Shape shape = ShapeUtil::MakeShape(element_type, dimensions);
*shape.mutable_layout() = LayoutUtil::MakeLayout(minor_to_major);
return shape;
}
Shape MakeShapeWithSparseLayout(PrimitiveType element_type,
- tensorflow::gtl::ArraySlice<int64> dimensions,
+ absl::Span<const int64> dimensions,
int64 max_sparse_elements) {
Shape shape = ShapeUtil::MakeShape(element_type, dimensions);
*shape.mutable_layout() = LayoutUtil::MakeSparseLayout(max_sparse_elements);
diff --git a/tensorflow/compiler/xla/literal.cc b/tensorflow/compiler/xla/literal.cc
index 2fc3613650..3f7635bd40 100644
--- a/tensorflow/compiler/xla/literal.cc
+++ b/tensorflow/compiler/xla/literal.cc
@@ -73,7 +73,7 @@ std::ostream& operator<<(std::ostream& out, const Literal& literal) {
MutableLiteralBase::StrideConfig::StrideConfig(
const Shape& source_shape, const Shape& dest_shape,
- tensorflow::gtl::ArraySlice<int64> dimensions)
+ absl::Span<const int64> dimensions)
: dimensions(dimensions),
base(dimensions.size(), 0),
step(dimensions.size(), 1) {
@@ -197,14 +197,13 @@ SparseIndexArray* MutableLiteralBase::sparse_indices(
template <typename NativeT>
Status MutableLiteralBase::CopySliceFromInternal(
- const LiteralBase& src_literal, tensorflow::gtl::ArraySlice<int64> src_base,
- tensorflow::gtl::ArraySlice<int64> dest_base,
- tensorflow::gtl::ArraySlice<int64> copy_size) {
+ const LiteralBase& src_literal, absl::Span<const int64> src_base,
+ absl::Span<const int64> dest_base, absl::Span<const int64> copy_size) {
TF_RET_CHECK(ShapeUtil::Rank(src_literal.shape()) == src_base.size());
TF_RET_CHECK(ShapeUtil::Rank(shape()) == dest_base.size());
auto linear_index = [](const Shape& shape,
- tensorflow::gtl::ArraySlice<int64> multi_index) {
+ absl::Span<const int64> multi_index) {
return IndexUtil::MultidimensionalIndexToLinearIndex(shape, multi_index);
};
@@ -232,7 +231,7 @@ Status MutableLiteralBase::CopySliceFromInternal(
MutableLiteralBase::StrideConfig stride_config(src_literal.shape(), shape(),
copy_size);
- auto copy_proc = [&](tensorflow::gtl::ArraySlice<int64> indexes) {
+ auto copy_proc = [&](absl::Span<const int64> indexes) {
// Map from multi-dimensional index, to source index.
std::transform(indexes.begin(), indexes.end(), src_base.begin(),
src_indexes.begin(), std::plus<int64>());
@@ -257,10 +256,9 @@ Status MutableLiteralBase::CopySliceFromInternal(
return Status::OK();
}
-Status MutableLiteralBase::CopyElementFrom(
- const LiteralSlice& src_literal,
- tensorflow::gtl::ArraySlice<int64> src_index,
- tensorflow::gtl::ArraySlice<int64> dest_index) {
+Status MutableLiteralBase::CopyElementFrom(const LiteralSlice& src_literal,
+ absl::Span<const int64> src_index,
+ absl::Span<const int64> dest_index) {
DCHECK_EQ(shape().element_type(), src_literal.shape().element_type());
const int64 src_linear_index = IndexUtil::MultidimensionalIndexToLinearIndex(
src_literal.shape(), src_index);
@@ -355,9 +353,9 @@ namespace {
// Copies the elements in 'src' to 'dest'. The shape and layout of the data in
// the array slices are indicated by dest_shape and src_shape respectively.
template <typename NativeT>
-void CopyElementsBetween(tensorflow::gtl::MutableArraySlice<NativeT> dest,
- tensorflow::gtl::ArraySlice<NativeT> src,
- const Shape& dest_shape, const Shape& src_shape) {
+void CopyElementsBetween(absl::Span<NativeT> dest,
+ absl::Span<const NativeT> src, const Shape& dest_shape,
+ const Shape& src_shape) {
CHECK(ShapeUtil::Compatible(dest_shape, src_shape));
if (ShapeUtil::IsZeroElementArray(dest_shape)) {
return;
@@ -487,11 +485,10 @@ Status Literal::MoveFrom(Literal&& src_literal,
return Status::OK();
}
-Status MutableLiteralBase::CopySliceFrom(
- const LiteralSlice& src_literal,
- tensorflow::gtl::ArraySlice<int64> src_base,
- tensorflow::gtl::ArraySlice<int64> dest_base,
- tensorflow::gtl::ArraySlice<int64> copy_size) {
+Status MutableLiteralBase::CopySliceFrom(const LiteralSlice& src_literal,
+ absl::Span<const int64> src_base,
+ absl::Span<const int64> dest_base,
+ absl::Span<const int64> copy_size) {
TF_RET_CHECK(ShapeUtil::IsArray(shape())) << ShapeUtil::HumanString(shape());
TF_RET_CHECK(ShapeUtil::IsArray(src_literal.shape()))
<< ShapeUtil::HumanString(src_literal.shape());
@@ -591,8 +588,7 @@ std::unique_ptr<Literal> LiteralBase::Relayout(
}
StatusOr<std::unique_ptr<Literal>> LiteralBase::Broadcast(
- const Shape& result_shape,
- tensorflow::gtl::ArraySlice<int64> dimensions) const {
+ const Shape& result_shape, absl::Span<const int64> dimensions) const {
if (!ShapeUtil::IsArray(shape())) {
return InvalidArgument("Broadcast only supports arrays.");
}
@@ -615,7 +611,7 @@ StatusOr<std::unique_ptr<Literal>> LiteralBase::Broadcast(
ShapeUtil::ByteSizeOfPrimitiveType(shape().element_type());
ShapeUtil::ForEachIndex(
- result_shape, [&](tensorflow::gtl::ArraySlice<int64> output_index) {
+ result_shape, [&](absl::Span<const int64> output_index) {
for (int64 i = 0; i < dimensions.size(); ++i) {
scratch_source_index[i] = output_index[dimensions[i]];
}
@@ -632,7 +628,7 @@ StatusOr<std::unique_ptr<Literal>> LiteralBase::Broadcast(
}
StatusOr<std::unique_ptr<Literal>> LiteralBase::Reshape(
- tensorflow::gtl::ArraySlice<int64> dimensions) const {
+ absl::Span<const int64> dimensions) const {
if (!ShapeUtil::IsArray(shape())) {
return InvalidArgument("Reshape does not support tuples.");
}
@@ -661,7 +657,7 @@ StatusOr<std::unique_ptr<Literal>> LiteralBase::Reshape(
}
std::unique_ptr<Literal> LiteralBase::Transpose(
- tensorflow::gtl::ArraySlice<int64> permutation) const {
+ absl::Span<const int64> permutation) const {
CHECK(ShapeUtil::IsArray(shape())) << "Tuple is not supported for transpose";
CHECK(IsPermutation(permutation, ShapeUtil::Rank(shape())))
<< "Given permutation is not a permutation of dimension numbers";
@@ -700,12 +696,11 @@ std::unique_ptr<Literal> LiteralBase::Transpose(
template <typename NativeT>
std::unique_ptr<Literal> LiteralBase::SliceInternal(
- const Shape& result_shape,
- tensorflow::gtl::ArraySlice<int64> start_indices) const {
+ const Shape& result_shape, absl::Span<const int64> start_indices) const {
auto result_literal = absl::make_unique<Literal>(result_shape);
DimensionVector new_indices(ShapeUtil::Rank(result_shape));
result_literal->EachCell<NativeT>(
- [&](tensorflow::gtl::ArraySlice<int64> indices, NativeT /*value*/) {
+ [&](absl::Span<const int64> indices, NativeT /*value*/) {
for (int64 i = 0; i < ShapeUtil::Rank(result_shape); ++i) {
new_indices[i] = indices[i] + start_indices[i];
}
@@ -716,8 +711,8 @@ std::unique_ptr<Literal> LiteralBase::SliceInternal(
}
std::unique_ptr<Literal> LiteralBase::Slice(
- tensorflow::gtl::ArraySlice<int64> start_indices,
- tensorflow::gtl::ArraySlice<int64> limit_indices) const {
+ absl::Span<const int64> start_indices,
+ absl::Span<const int64> limit_indices) const {
CHECK(ShapeUtil::IsArray(shape())) << "tuple is not supported for slice";
DimensionVector result_dimensions;
@@ -761,7 +756,7 @@ std::unique_ptr<Literal> LiteralBase::CloneToUnique() const {
return result;
}
-string LiteralBase::GetAsString(tensorflow::gtl::ArraySlice<int64> multi_index,
+string LiteralBase::GetAsString(absl::Span<const int64> multi_index,
const ShapeIndex& shape_index) const {
const Shape& subshape = ShapeUtil::GetSubshape(shape(), shape_index);
CHECK(LayoutUtil::IsDenseArray(subshape));
@@ -858,7 +853,7 @@ string LiteralBase::GetSparseElementAsString(
}
StatusOr<int64> LiteralBase::GetIntegralAsS64(
- tensorflow::gtl::ArraySlice<int64> multi_index) const {
+ absl::Span<const int64> multi_index) const {
CHECK(LayoutUtil::IsDenseArray(shape()));
switch (shape().element_type()) {
case PRED:
@@ -900,8 +895,8 @@ size_t LiteralBase::Hash() const {
return hash_value;
}
-Status MutableLiteralBase::SetIntegralAsS64(
- tensorflow::gtl::ArraySlice<int64> multi_index, int64 value) {
+Status MutableLiteralBase::SetIntegralAsS64(absl::Span<const int64> multi_index,
+ int64 value) {
CHECK(LayoutUtil::IsDenseArray(shape()));
switch (shape().element_type()) {
case PRED:
@@ -929,7 +924,7 @@ Status MutableLiteralBase::SetIntegralAsS64(
return Status::OK();
}
-tensorflow::gtl::ArraySlice<int64> LiteralBase::GetSparseIndex(
+absl::Span<const int64> LiteralBase::GetSparseIndex(
int64 sparse_element_number, const ShapeIndex& shape_index) const {
const Piece& p = piece(shape_index);
CHECK_GE(sparse_element_number, 0);
@@ -998,7 +993,7 @@ void LiteralBase::Piece::SortSparseElementsInternal() {
auto values = data<NativeT>();
CHECK_LE(num_elements, values.size());
sparse_indices()->SortWithValues(
- tensorflow::gtl::MutableArraySlice<NativeT>(values.data(), num_elements));
+ absl::Span<NativeT>(values.data(), num_elements));
}
namespace {
@@ -1064,8 +1059,7 @@ void ToStringHelper(const LiteralBase& literal, const ShapeIndex& shape_index,
CHECK(LayoutUtil::IsDenseArray(subshape));
- auto element_to_string =
- [&](tensorflow::gtl::ArraySlice<int64> indices) -> string {
+ auto element_to_string = [&](absl::Span<const int64> indices) -> string {
PrimitiveType element_type = subshape.element_type();
if (element_type == PRED) {
// We display predicates in a densely packed form.
@@ -1160,7 +1154,7 @@ void ToStringHelper(const LiteralBase& literal, const ShapeIndex& shape_index,
pieces->push_back(shape_to_string(subshape));
pieces->push_back(" {");
literal.EachCellAsString(
- [&](tensorflow::gtl::ArraySlice<int64> indices, const string& value) {
+ [&](absl::Span<const int64> indices, const string& value) {
pieces->push_back(" ");
pieces->push_back(value);
});
@@ -1183,7 +1177,7 @@ string LiteralBase::ToString(bool print_layout) const {
}
void LiteralBase::EachCellAsString(
- const std::function<void(tensorflow::gtl::ArraySlice<int64> indices,
+ const std::function<void(absl::Span<const int64> indices,
const string& value)>& per_cell) const {
if (ShapeUtil::IsZeroElementArray(shape())) {
return;
@@ -1250,10 +1244,8 @@ std::unique_ptr<Literal> ConvertToC64(const LiteralBase& src_literal) {
ShapeUtil::ChangeElementType(src_literal.shape(), C64));
using NativeSrcT =
typename primitive_util::PrimitiveTypeToNative<primitive_src_type>::type;
- tensorflow::gtl::ArraySlice<NativeSrcT> src_data =
- src_literal.data<NativeSrcT>();
- tensorflow::gtl::MutableArraySlice<complex64> dest_data =
- result_literal->data<complex64>();
+ absl::Span<const NativeSrcT> src_data = src_literal.data<NativeSrcT>();
+ absl::Span<complex64> dest_data = result_literal->data<complex64>();
int64 num_elements = src_literal.element_count();
for (int64 i = 0; i < num_elements; ++i) {
dest_data[i] = complex64(static_cast<float>(src_data[i]), 0);
@@ -1397,7 +1389,7 @@ StatusOr<std::unique_ptr<Literal>> LiteralBase::ConvertToShape(
}
/* static */ Literal MutableLiteralBase::MoveIntoTuple(
- tensorflow::gtl::MutableArraySlice<Literal> elements) {
+ absl::Span<Literal> elements) {
std::vector<Shape> element_shapes;
for (const Literal& element : elements) {
element_shapes.push_back(element.shape());
@@ -1488,7 +1480,7 @@ bool LiteralBase::operator==(const LiteralBase& other) const {
namespace {
template <typename NativeT>
-static bool AllElementsEqualValue(tensorflow::gtl::ArraySlice<NativeT> data,
+static bool AllElementsEqualValue(absl::Span<const NativeT> data,
NativeT value) {
for (int64 i = 0; i < data.size(); ++i) {
if (data[i] != value) {
@@ -1742,7 +1734,7 @@ bool LiteralBase::IsR1Iota() const {
return true;
}
-bool LiteralBase::IsZero(tensorflow::gtl::ArraySlice<int64> indices) const {
+bool LiteralBase::IsZero(absl::Span<const int64> indices) const {
CHECK(ShapeUtil::IsArray(shape()));
switch (shape().element_type()) {
case U8:
@@ -1778,7 +1770,7 @@ namespace {
template <typename RepeatedFieldT, typename NativeT>
void CopyToRepeatedField(RepeatedFieldT* dest,
- const tensorflow::gtl::ArraySlice<NativeT> src) {
+ const absl::Span<const NativeT> src) {
*dest = RepeatedFieldT(src.begin(), src.end());
}
@@ -1856,7 +1848,7 @@ void* LiteralBase::Piece::untyped_data() {
namespace {
template <typename RepeatedFieldT, typename NativeT>
-Status CopyFromRepeatedField(tensorflow::gtl::MutableArraySlice<NativeT> dest,
+Status CopyFromRepeatedField(absl::Span<NativeT> dest,
const RepeatedFieldT& src) {
if (dest.size() != src.size()) {
return InvalidArgument(
@@ -2126,8 +2118,8 @@ BorrowingLiteral::BorrowingLiteral(const char* src_buf_ptr, const Shape& shape)
root_piece_.set_subshape(shape_.get());
}
-BorrowingLiteral::BorrowingLiteral(
- tensorflow::gtl::ArraySlice<const char*> src_buf_ptrs, const Shape& shape)
+BorrowingLiteral::BorrowingLiteral(absl::Span<const char* const> src_buf_ptrs,
+ const Shape& shape)
: LiteralBase(), shape_(absl::make_unique<Shape>(shape)) {
CHECK(ShapeUtil::IsTuple(*shape_));
CHECK(!ShapeUtil::IsNestedTuple(*shape_));
diff --git a/tensorflow/compiler/xla/literal.h b/tensorflow/compiler/xla/literal.h
index c6ef99db0f..d690241e4e 100644
--- a/tensorflow/compiler/xla/literal.h
+++ b/tensorflow/compiler/xla/literal.h
@@ -70,13 +70,12 @@ class LiteralBase {
// Serialize to proto.
LiteralProto ToProto() const;
- // Returns an ArraySlice of the array for this literal for the given NativeT
+ // Returns a Span of the array for this literal for the given NativeT
// (e.g., float). CHECKs if the subshape of the literal at the given
// ShapeIndex is not array. See primitive_util.h for the mapping from XLA type
// to native type.
template <typename NativeT>
- tensorflow::gtl::ArraySlice<NativeT> data(
- const ShapeIndex& shape_index = {}) const;
+ absl::Span<const NativeT> data(const ShapeIndex& shape_index = {}) const;
// Returns a const pointer to the sparse index array. Returns nullptr if the
// literal is not a sparse array.
@@ -100,12 +99,12 @@ class LiteralBase {
// Gets an element in the literal at the given index. The multi_index is
// CHECKed against the dimension sizes.
template <typename NativeT>
- NativeT Get(tensorflow::gtl::ArraySlice<int64> multi_index,
+ NativeT Get(absl::Span<const int64> multi_index,
const ShapeIndex& shape_index) const;
// Overloads of Get for array literals. CHECKs if the literal is not
// array-shaped and dense.
template <typename NativeT>
- NativeT Get(tensorflow::gtl::ArraySlice<int64> multi_index) const;
+ NativeT Get(absl::Span<const int64> multi_index) const;
// Returns the element value at index (0, ..., 0), however many zeroes are
// required for that index.
@@ -114,7 +113,7 @@ class LiteralBase {
// As Get(), but determines the correct type and converts the value
// into text.
- string GetAsString(tensorflow::gtl::ArraySlice<int64> multi_index,
+ string GetAsString(absl::Span<const int64> multi_index,
const ShapeIndex& shape_index = {}) const;
// As GetSparseElement(), but determines the correct type and converts the
// value into text.
@@ -122,14 +121,13 @@ class LiteralBase {
const ShapeIndex& shape_index = {}) const;
// As Get(), but determines the correct type and converts the value into
// int64. This literal must be an array.
- StatusOr<int64> GetIntegralAsS64(
- tensorflow::gtl::ArraySlice<int64> multi_index) const;
+ StatusOr<int64> GetIntegralAsS64(absl::Span<const int64> multi_index) const;
// Returns the multi-index of the element in a sparse literal at the given
// sparse element number. The sparse element number is the position with in
// the sparse array's list of (index, value) pairs, and is checked against the
// total number of (index, value) pairs in the sparse array.
- tensorflow::gtl::ArraySlice<int64> GetSparseIndex(
+ absl::Span<const int64> GetSparseIndex(
int64 sparse_element_number, const ShapeIndex& shape_index = {}) const;
// Returns the value of the element in a sparse literal at the given sparse
@@ -150,12 +148,12 @@ class LiteralBase {
//
// This literal must have a dense layout.
void EachCellAsString(
- const std::function<void(tensorflow::gtl::ArraySlice<int64> indices,
+ const std::function<void(absl::Span<const int64> indices,
const string& value)>& per_cell) const;
template <typename NativeT>
- void EachCell(std::function<void(tensorflow::gtl::ArraySlice<int64> indices,
- NativeT value)>
- per_cell) const;
+ void EachCell(
+ std::function<void(absl::Span<const int64> indices, NativeT value)>
+ per_cell) const;
// Returns whether every element in this literal is equal to value.
//
@@ -200,7 +198,7 @@ class LiteralBase {
// Returns whether this literal is zero at the specified index. This literal
// must be an array with a dense layout.
- bool IsZero(tensorflow::gtl::ArraySlice<int64> indices) const;
+ bool IsZero(absl::Span<const int64> indices) const;
// Returns the count of the elements in the array at the given shape index in
// this literal.
@@ -273,13 +271,12 @@ class LiteralBase {
// implementation currently only supports monotonic dim0-major layouts.
// This literal must be an array.
StatusOr<std::unique_ptr<Literal>> Reshape(
- tensorflow::gtl::ArraySlice<int64> dimensions) const;
+ absl::Span<const int64> dimensions) const;
// Creates a new literal by broadcasting this literal with `dimensions` to
// yield a literal of shape `result_shape`.
StatusOr<std::unique_ptr<Literal>> Broadcast(
- const Shape& result_shape,
- tensorflow::gtl::ArraySlice<int64> dimensions) const;
+ const Shape& result_shape, absl::Span<const int64> dimensions) const;
// Creates a new literal by reordering the dimensions of this literal.
// The given `permutation` must be a permutation of the dimension numbers
@@ -288,8 +285,7 @@ class LiteralBase {
// For example, a transpose call on a literal of shape [3 x 8 x 4] and
// `permutation` = {2, 0, 1} returns a new literal of shape [4 x 3 x 8].
// This literal must be an array.
- std::unique_ptr<Literal> Transpose(
- tensorflow::gtl::ArraySlice<int64> permutation) const;
+ std::unique_ptr<Literal> Transpose(absl::Span<const int64> permutation) const;
// Creates a sub-array from this literal by extracting the indices
// [start_index, limit_index) of each dimension. The result literal has the
@@ -297,9 +293,8 @@ class LiteralBase {
// start_indices and limit_indices must be the rank of the literal, and the
// indices follow the order of the dimensions.
// This literal must be an array.
- std::unique_ptr<Literal> Slice(
- tensorflow::gtl::ArraySlice<int64> start_indices,
- tensorflow::gtl::ArraySlice<int64> limit_indices) const;
+ std::unique_ptr<Literal> Slice(absl::Span<const int64> start_indices,
+ absl::Span<const int64> limit_indices) const;
// Creates a literal with a prepended dimension with bound "times"; e.g. a
// f32[3x2] with times=4 will produce a f32[4x3x2] with the 3x2 from this
@@ -328,9 +323,9 @@ class LiteralBase {
// Returns the buffer holding the array data for this piece as an array
// slice. This piece must be array-shaped.
template <typename NativeT>
- tensorflow::gtl::ArraySlice<NativeT> data() const;
+ absl::Span<const NativeT> data() const;
template <typename NativeT>
- tensorflow::gtl::MutableArraySlice<NativeT> data();
+ absl::Span<NativeT> data();
// Returns the buffer holding the array data for this piece as a void*. This
// piece must be array-shaped.
@@ -341,9 +336,9 @@ class LiteralBase {
// is CHECKed against the dimension sizes of the array. This piece must be
// array-shaped.
template <typename NativeT>
- NativeT Get(tensorflow::gtl::ArraySlice<int64> index) const;
+ NativeT Get(absl::Span<const int64> index) const;
template <typename NativeT>
- void Set(tensorflow::gtl::ArraySlice<int64> index, NativeT value);
+ void Set(absl::Span<const int64> index, NativeT value);
// Gets/sets the buffer holding the array data.
char* buffer() const { return buffer_; }
@@ -545,8 +540,7 @@ class LiteralBase {
private:
template <typename NativeT>
std::unique_ptr<Literal> SliceInternal(
- const Shape& result_shape,
- tensorflow::gtl::ArraySlice<int64> start_indices) const;
+ const Shape& result_shape, absl::Span<const int64> start_indices) const;
};
// Abstract base class representing a mutable literal in XLA.
@@ -554,13 +548,12 @@ class MutableLiteralBase : public LiteralBase {
public:
virtual ~MutableLiteralBase() = 0;
- // Returns a MutableArraySlice view of the array for this literal for the
+ // Returns a Span view of the array for this literal for the
// given NativeT (e.g., float). CHECKs if the subshape of the literal at the
// given ShapeIndex is not array. See primitive_util.h for the mapping from
// XLA type to native type.
template <typename NativeT>
- tensorflow::gtl::MutableArraySlice<NativeT> data(
- const ShapeIndex& shape_index = {});
+ absl::Span<NativeT> data(const ShapeIndex& shape_index = {});
// Unhide const method from parent class.
using LiteralBase::data;
@@ -587,8 +580,7 @@ class MutableLiteralBase : public LiteralBase {
// are populated.
template <typename NativeT>
void PopulateSparse(SparseIndexArray indices,
- tensorflow::gtl::ArraySlice<NativeT> values,
- bool sort = true);
+ absl::Span<const NativeT> values, bool sort = true);
// Copy values from 'src_literal' rooted at 'src_shape_index' into this
// literal rooted at 'dest_shape_index'. The subshape of this literal rooted
@@ -609,39 +601,38 @@ class MutableLiteralBase : public LiteralBase {
// corresponding base indices being 0.
// This literal and 'src_literal' must be arrays.
Status CopySliceFrom(const LiteralSlice& src_literal,
- tensorflow::gtl::ArraySlice<int64> src_base,
- tensorflow::gtl::ArraySlice<int64> dest_base,
- tensorflow::gtl::ArraySlice<int64> copy_size);
+ absl::Span<const int64> src_base,
+ absl::Span<const int64> dest_base,
+ absl::Span<const int64> copy_size);
// Copies one element from src_literal[src_index] to (*this)[dest_index].
Status CopyElementFrom(const LiteralSlice& src_literal,
- tensorflow::gtl::ArraySlice<int64> src_index,
- tensorflow::gtl::ArraySlice<int64> dest_index);
+ absl::Span<const int64> src_index,
+ absl::Span<const int64> dest_index);
// Sets an element in the literal at the given index. The multi_index is
// CHECKed against the dimension sizes.
template <typename NativeT>
- void Set(tensorflow::gtl::ArraySlice<int64> multi_index,
- const ShapeIndex& shape_index, NativeT value);
+ void Set(absl::Span<const int64> multi_index, const ShapeIndex& shape_index,
+ NativeT value);
// Overloads of Set for array literals. CHECKs if the literal is not
// array-shaped and dense.
template <typename NativeT>
- void Set(tensorflow::gtl::ArraySlice<int64> multi_index, NativeT value);
+ void Set(absl::Span<const int64> multi_index, NativeT value);
// Appends the given element to the literal. If the elements are not appended
// in sorted order, then SortSparseElements should be called before calling
// other methods. This literal must have a sparse layout.
template <typename NativeT>
- void AppendSparseElement(tensorflow::gtl::ArraySlice<int64> multi_index,
- NativeT value, const ShapeIndex& shape_index = {});
+ void AppendSparseElement(absl::Span<const int64> multi_index, NativeT value,
+ const ShapeIndex& shape_index = {});
// Sorts the elements in a sparse array.
void SortSparseElements(const ShapeIndex& shape_index = {});
// As Set(), but truncates `value` to the literal element type before storing.
// This literal must be an array.
- Status SetIntegralAsS64(tensorflow::gtl::ArraySlice<int64> multi_index,
- int64 value);
+ Status SetIntegralAsS64(absl::Span<const int64> multi_index, int64 value);
// Populate this literal with the given values. Examples:
//
@@ -656,7 +647,7 @@ class MutableLiteralBase : public LiteralBase {
// example, in the call above to literal.PopulateR2(), 'literal' must be a 2x2
// array of S32.
template <typename NativeT>
- void PopulateR1(tensorflow::gtl::ArraySlice<NativeT> values);
+ void PopulateR1(absl::Span<const NativeT> values);
void PopulateR1(const tensorflow::core::Bitmap& values);
template <typename NativeT>
void PopulateR2(std::initializer_list<std::initializer_list<NativeT>> values);
@@ -673,7 +664,7 @@ class MutableLiteralBase : public LiteralBase {
// in this literal object.
//
// generator must be a callable of the type
- // NativeT(tensorflow::gtl::ArraySlice<int64> indexes) or compatible.
+ // NativeT(absl::Span<int64> indexes) or compatible.
//
// This literal must have a dense layout.
template <typename NativeT, typename FnType>
@@ -693,8 +684,7 @@ class MutableLiteralBase : public LiteralBase {
// moved into the tuple elements of a new tuple-shaped Literal which is
// returned. Upon return, each of the Literals in 'elements' is set to a nil
// shape (empty tuple).
- static Literal MoveIntoTuple(
- tensorflow::gtl::MutableArraySlice<Literal> elements);
+ static Literal MoveIntoTuple(absl::Span<Literal> elements);
// Serialize from a proto.
static StatusOr<std::unique_ptr<Literal>> CreateFromProto(
@@ -712,20 +702,20 @@ class MutableLiteralBase : public LiteralBase {
// arguments one by one.
template <typename NativeT>
Status CopySliceFromInternal(const LiteralBase& src_literal,
- tensorflow::gtl::ArraySlice<int64> src_base,
- tensorflow::gtl::ArraySlice<int64> dest_base,
- tensorflow::gtl::ArraySlice<int64> copy_size);
+ absl::Span<const int64> src_base,
+ absl::Span<const int64> dest_base,
+ absl::Span<const int64> copy_size);
// Utility structure which is used to create the optimal configuration for
// a ShapeUtil::ForEachIndex() scan across two literals.
struct StrideConfig {
StrideConfig(const Shape& source_shape, const Shape& dest_shape,
- tensorflow::gtl::ArraySlice<int64> dimensions);
+ absl::Span<const int64> dimensions);
// The dimensions of the stride operation. Essentially every dimension
// will be iterated from base[i] to base[i]+dimensions[i], in step[i]
// steps.
- tensorflow::gtl::ArraySlice<int64> dimensions;
+ absl::Span<const int64> dimensions;
DimensionVector base;
DimensionVector step;
int64 minor_dimension = 0;
@@ -854,7 +844,7 @@ class BorrowingLiteral : public LiteralBase {
// This constructor is only used for array shapes.
BorrowingLiteral(const char* src_buf_ptr, const Shape& shape);
// Similar as above, except to be used for constructing non-nested tuples.
- BorrowingLiteral(tensorflow::gtl::ArraySlice<const char*> src_buf_ptrs,
+ BorrowingLiteral(absl::Span<const char* const> src_buf_ptrs,
const Shape& shape);
// TODO(b/79707221): adding constructors for nested tuples as well.
@@ -874,7 +864,7 @@ class BorrowingLiteral : public LiteralBase {
};
template <typename NativeT>
-tensorflow::gtl::ArraySlice<NativeT> LiteralBase::Piece::data() const {
+absl::Span<const NativeT> LiteralBase::Piece::data() const {
CHECK(ShapeUtil::IsArray(subshape())) << ShapeUtil::HumanString(subshape());
CHECK_EQ(subshape().element_type(),
primitive_util::NativeToPrimitiveType<NativeT>())
@@ -882,12 +872,12 @@ tensorflow::gtl::ArraySlice<NativeT> LiteralBase::Piece::data() const {
<< PrimitiveType_Name(primitive_util::NativeToPrimitiveType<NativeT>())
<< " type, but literal element type is "
<< PrimitiveType_Name(subshape().element_type());
- return tensorflow::gtl::ArraySlice<NativeT>(
- reinterpret_cast<const NativeT*>(buffer()), element_count());
+ return absl::Span<const NativeT>(reinterpret_cast<const NativeT*>(buffer()),
+ element_count());
}
template <typename NativeT>
-tensorflow::gtl::MutableArraySlice<NativeT> LiteralBase::Piece::data() {
+absl::Span<NativeT> LiteralBase::Piece::data() {
CHECK(ShapeUtil::IsArray(subshape())) << ShapeUtil::HumanString(subshape());
CHECK_EQ(subshape().element_type(),
primitive_util::NativeToPrimitiveType<NativeT>())
@@ -895,20 +885,19 @@ tensorflow::gtl::MutableArraySlice<NativeT> LiteralBase::Piece::data() {
<< PrimitiveType_Name(primitive_util::NativeToPrimitiveType<NativeT>())
<< " type, but literal element type is "
<< PrimitiveType_Name(subshape().element_type());
- return tensorflow::gtl::MutableArraySlice<NativeT>(
- reinterpret_cast<NativeT*>(buffer()), element_count());
+ return absl::Span<NativeT>(reinterpret_cast<NativeT*>(buffer()),
+ element_count());
}
template <typename NativeT>
-NativeT LiteralBase::Piece::Get(
- tensorflow::gtl::ArraySlice<int64> multi_index) const {
+NativeT LiteralBase::Piece::Get(absl::Span<const int64> multi_index) const {
CHECK(LayoutUtil::IsDenseArray(subshape()));
return data<NativeT>()[IndexUtil::MultidimensionalIndexToLinearIndex(
subshape(), multi_index)];
}
template <typename NativeT>
-void LiteralBase::Piece::Set(tensorflow::gtl::ArraySlice<int64> multi_index,
+void LiteralBase::Piece::Set(absl::Span<const int64> multi_index,
NativeT value) {
CHECK(LayoutUtil::IsDenseArray(subshape()));
data<NativeT>()[IndexUtil::MultidimensionalIndexToLinearIndex(
@@ -916,39 +905,37 @@ void LiteralBase::Piece::Set(tensorflow::gtl::ArraySlice<int64> multi_index,
}
template <typename NativeT>
-tensorflow::gtl::ArraySlice<NativeT> LiteralBase::data(
+absl::Span<const NativeT> LiteralBase::data(
const ShapeIndex& shape_index) const {
return piece(shape_index).data<NativeT>();
}
template <typename NativeT>
-tensorflow::gtl::MutableArraySlice<NativeT> MutableLiteralBase::data(
- const ShapeIndex& shape_index) {
+absl::Span<NativeT> MutableLiteralBase::data(const ShapeIndex& shape_index) {
return piece(shape_index).data<NativeT>();
}
template <typename NativeT>
-inline NativeT LiteralBase::Get(tensorflow::gtl::ArraySlice<int64> multi_index,
+inline NativeT LiteralBase::Get(absl::Span<const int64> multi_index,
const ShapeIndex& shape_index) const {
return piece(shape_index).Get<NativeT>(multi_index);
}
template <typename NativeT>
-inline NativeT LiteralBase::Get(
- tensorflow::gtl::ArraySlice<int64> multi_index) const {
+inline NativeT LiteralBase::Get(absl::Span<const int64> multi_index) const {
return root_piece().Get<NativeT>(multi_index);
}
template <typename NativeT>
-inline void MutableLiteralBase::Set(
- tensorflow::gtl::ArraySlice<int64> multi_index,
- const ShapeIndex& shape_index, NativeT value) {
+inline void MutableLiteralBase::Set(absl::Span<const int64> multi_index,
+ const ShapeIndex& shape_index,
+ NativeT value) {
return piece(shape_index).Set<NativeT>(multi_index, value);
}
template <typename NativeT>
-inline void MutableLiteralBase::Set(
- tensorflow::gtl::ArraySlice<int64> multi_index, NativeT value) {
+inline void MutableLiteralBase::Set(absl::Span<const int64> multi_index,
+ NativeT value) {
return root_piece().Set<NativeT>(multi_index, value);
}
@@ -967,7 +954,7 @@ NativeT LiteralBase::GetSparseElement(int64 sparse_element_number,
template <typename NativeT>
void MutableLiteralBase::AppendSparseElement(
- tensorflow::gtl::ArraySlice<int64> multi_index, NativeT value,
+ absl::Span<const int64> multi_index, NativeT value,
const ShapeIndex& shape_index) {
Piece& p = piece(shape_index);
const Shape& subshape = p.subshape();
@@ -983,8 +970,7 @@ void MutableLiteralBase::AppendSparseElement(
template <typename NativeT>
void LiteralBase::EachCell(
- std::function<void(tensorflow::gtl::ArraySlice<int64> indices,
- NativeT value)>
+ std::function<void(absl::Span<const int64> indices, NativeT value)>
per_cell) const {
if (ShapeUtil::IsZeroElementArray(shape())) {
return;
@@ -996,8 +982,7 @@ void LiteralBase::EachCell(
}
template <typename NativeT>
-inline void MutableLiteralBase::PopulateR1(
- tensorflow::gtl::ArraySlice<NativeT> values) {
+inline void MutableLiteralBase::PopulateR1(absl::Span<const NativeT> values) {
CHECK(ShapeUtil::IsArray(shape()));
CHECK_EQ(ShapeUtil::Rank(shape()), 1);
CHECK_EQ(ShapeUtil::ElementsIn(shape()), values.size());
@@ -1042,8 +1027,9 @@ void MutableLiteralBase::PopulateFromArray(const Array<NativeT>& values) {
for (int dim = 0; dim < values.num_dimensions(); ++dim) {
CHECK_EQ(values.dim(dim), shape().dimensions(dim));
}
- values.Each([this](tensorflow::gtl::ArraySlice<int64> indices,
- NativeT value) { this->Set(indices, value); });
+ values.Each([this](absl::Span<const int64> indices, NativeT value) {
+ this->Set(indices, value);
+ });
}
template <typename NativeT>
@@ -1062,9 +1048,9 @@ void MutableLiteralBase::PopulateR4FromArray4D(const Array4D<NativeT>& values) {
}
template <typename NativeT>
-void MutableLiteralBase::PopulateSparse(
- SparseIndexArray indices, tensorflow::gtl::ArraySlice<NativeT> values,
- bool sort) {
+void MutableLiteralBase::PopulateSparse(SparseIndexArray indices,
+ absl::Span<const NativeT> values,
+ bool sort) {
CHECK(LayoutUtil::IsSparseArray(shape()));
int rank = ShapeUtil::Rank(shape());
CHECK_EQ(indices.rank(), rank);
@@ -1074,7 +1060,7 @@ void MutableLiteralBase::PopulateSparse(
CHECK_LE(num_elements, max_elements);
CHECK_EQ(num_elements, indices.index_count());
auto root_data = root_piece().data<NativeT>();
- // Piece::data() returns an ArraySlice of size equal to the number of indices
+ // Piece::data() returns a Span of size equal to the number of indices
// in the SparseIndexArray. So there is no need to adjust the size of the data
// here. It is enough to just copy the incoming values into the data buffer.
std::copy(values.begin(), values.end(), root_data.begin());
@@ -1094,14 +1080,14 @@ Status MutableLiteralBase::PopulateInternal(const FnType& generator,
TF_RET_CHECK(LayoutUtil::IsDenseArray(this_shape));
TF_RET_CHECK(this_shape.element_type() ==
primitive_util::NativeToPrimitiveType<NativeT>());
- tensorflow::gtl::MutableArraySlice<NativeT> literal_data = data<NativeT>();
+ absl::Span<NativeT> literal_data = data<NativeT>();
if (rank > 0) {
StrideConfig stride_config(this_shape, this_shape,
AsInt64Slice(this_shape.dimensions()));
int64 minor_dimension_size =
ShapeUtil::GetDimension(this_shape, stride_config.minor_dimension);
- auto init_function = [&](tensorflow::gtl::ArraySlice<int64> indexes) {
+ auto init_function = [&](absl::Span<const int64> indexes) {
DimensionVector minor_scan_indexes(rank, 0);
const int64 index =
IndexUtil::MultidimensionalIndexToLinearIndex(shape(), indexes);
@@ -1119,7 +1105,7 @@ Status MutableLiteralBase::PopulateInternal(const FnType& generator,
ShapeUtil::ForEachIndex(
this_shape, stride_config.base, stride_config.dimensions,
stride_config.step,
- [&init_function](tensorflow::gtl::ArraySlice<int64> indexes) {
+ [&init_function](absl::Span<const int64> indexes) {
init_function(indexes);
return true;
});
@@ -1165,7 +1151,7 @@ std::unique_ptr<Literal> LiteralBase::Replicate(int64 times) const {
}
DimensionVector output_indices(bounds.size(), 0);
- tensorflow::gtl::ArraySlice<int64> input_indices = output_indices;
+ absl::Span<const int64> input_indices = output_indices;
input_indices.remove_prefix(1);
bool done = false;
diff --git a/tensorflow/compiler/xla/literal_comparison.cc b/tensorflow/compiler/xla/literal_comparison.cc
index f6ce69eaee..3d8725ed70 100644
--- a/tensorflow/compiler/xla/literal_comparison.cc
+++ b/tensorflow/compiler/xla/literal_comparison.cc
@@ -38,8 +38,8 @@ namespace {
// between the left-hand-side and right-hand-side, by bit-casting to UnsignedT
// -- on miscompare, a nice error message is given in the AssertionFailure.
template <typename FloatT, typename UnsignedT>
-Status CompareFloatsBitwiseEqual(
- FloatT lhs, FloatT rhs, tensorflow::gtl::ArraySlice<int64> multi_index) {
+Status CompareFloatsBitwiseEqual(FloatT lhs, FloatT rhs,
+ absl::Span<const int64> multi_index) {
auto ulhs = tensorflow::bit_cast<UnsignedT>(lhs);
auto urhs = tensorflow::bit_cast<UnsignedT>(rhs);
auto lhs_double = static_cast<double>(lhs);
@@ -60,7 +60,7 @@ Status CompareFloatsBitwiseEqual(
// default gunit implementation).
template <typename NativeT>
Status CompareEqual(NativeT lhs, NativeT rhs,
- tensorflow::gtl::ArraySlice<int64> multi_index) {
+ absl::Span<const int64> multi_index) {
if (lhs == rhs) {
return Status::OK();
}
@@ -74,28 +74,27 @@ Status CompareEqual(NativeT lhs, NativeT rhs,
// comparison is requested.
template <>
Status CompareEqual<bfloat16>(bfloat16 lhs, bfloat16 rhs,
- tensorflow::gtl::ArraySlice<int64> multi_index) {
+ absl::Span<const int64> multi_index) {
return CompareFloatsBitwiseEqual<bfloat16, uint16>(lhs, rhs, multi_index);
}
template <>
-Status CompareEqual<Eigen::half>(
- Eigen::half lhs, Eigen::half rhs,
- tensorflow::gtl::ArraySlice<int64> multi_index) {
+Status CompareEqual<Eigen::half>(Eigen::half lhs, Eigen::half rhs,
+ absl::Span<const int64> multi_index) {
return CompareFloatsBitwiseEqual<Eigen::half, uint16>(lhs, rhs, multi_index);
}
template <>
Status CompareEqual<float>(float lhs, float rhs,
- tensorflow::gtl::ArraySlice<int64> multi_index) {
+ absl::Span<const int64> multi_index) {
return CompareFloatsBitwiseEqual<float, uint32>(lhs, rhs, multi_index);
}
template <>
Status CompareEqual<double>(double lhs, double rhs,
- tensorflow::gtl::ArraySlice<int64> multi_index) {
+ absl::Span<const int64> multi_index) {
return CompareFloatsBitwiseEqual<double, uint64>(lhs, rhs, multi_index);
}
template <>
Status CompareEqual<complex64>(complex64 lhs, complex64 rhs,
- tensorflow::gtl::ArraySlice<int64> multi_index) {
+ absl::Span<const int64> multi_index) {
auto res = CompareEqual<float>(lhs.real(), rhs.real(), multi_index);
if (!res.ok()) {
return res;
@@ -108,8 +107,7 @@ Status CompareEqual<complex64>(complex64 lhs, complex64 rhs,
// elements are equal.
template <typename NativeT>
Status Equal(LiteralSlice expected, LiteralSlice actual,
- tensorflow::gtl::MutableArraySlice<int64> multi_index,
- int64 dimension) {
+ absl::Span<int64> multi_index, int64 dimension) {
if (dimension == expected.shape().dimensions_size()) {
NativeT expected_value = expected.Get<NativeT>(multi_index);
NativeT actual_value = actual.Get<NativeT>(multi_index);
@@ -305,8 +303,7 @@ class NearComparator {
}
// Insert the given error into the given error bucket vector.
- void UpdateErrorBucket(
- float error, tensorflow::gtl::MutableArraySlice<int64> error_buckets) {
+ void UpdateErrorBucket(float error, absl::Span<int64> error_buckets) {
CHECK_EQ(error_buckets.size(), kErrorBucketBounds.size());
for (int i = 0; i < error_buckets.size(); ++i) {
if (error >= kErrorBucketBounds[i]) {
@@ -410,10 +407,8 @@ class NearComparator {
// Fast path optimization for the case were layouts match.
if (LayoutUtil::Equal(actual_.shape().layout(),
expected_.shape().layout())) {
- tensorflow::gtl::ArraySlice<const NativeT> expected_data =
- expected_.data<NativeT>();
- tensorflow::gtl::ArraySlice<const NativeT> actual_data =
- actual_.data<NativeT>();
+ absl::Span<const NativeT> expected_data = expected_.data<NativeT>();
+ absl::Span<const NativeT> actual_data = actual_.data<NativeT>();
const int64 len = expected_data.size();
for (int64 i = 0; i < len; ++i) {
CompareValues(expected_data[i], actual_data[i], i);
@@ -488,7 +483,7 @@ class NearComparator {
}
auto print_accum_buckets = [&](const string& header, int64 total,
- tensorflow::gtl::ArraySlice<int64> buckets) {
+ absl::Span<const int64> buckets) {
StrAppend(&out, header, ":\n");
StrAppendFormat(&out, " < %-6g : %7d (%s)\n", kErrorBucketBounds[0],
total - buckets[0],
diff --git a/tensorflow/compiler/xla/literal_test.cc b/tensorflow/compiler/xla/literal_test.cc
index 5d7d4dbb36..922196b1fd 100644
--- a/tensorflow/compiler/xla/literal_test.cc
+++ b/tensorflow/compiler/xla/literal_test.cc
@@ -36,7 +36,6 @@ limitations under the License.
namespace xla {
namespace {
-using tensorflow::gtl::ArraySlice;
using ::testing::ElementsAre;
using ::testing::HasSubstr;
@@ -222,9 +221,9 @@ TEST_F(LiteralUtilTest, CreateSparse) {
std::vector<int64> expected_values = {8, 9, 7, 10};
EXPECT_EQ(literal->sparse_indices()->data(),
- ArraySlice<int64>(expected_indices.data(),
- expected_indices.num_elements()));
- EXPECT_EQ(literal->data<int64>(), ArraySlice<int64>(expected_values));
+ absl::Span<const int64>(expected_indices.data(),
+ expected_indices.num_elements()));
+ EXPECT_EQ(literal->data<int64>(), absl::Span<const int64>(expected_values));
}
TEST_F(LiteralUtilTest, LiteralR4F32ProjectedStringifies) {
@@ -296,7 +295,7 @@ TEST_F(LiteralUtilTest, EachCellR2F32) {
// clang-format on
std::vector<std::tuple<int64, int64, string>> seen;
literal->EachCellAsString(
- [&seen](ArraySlice<int64> indices, const string& value) {
+ [&seen](absl::Span<const int64> indices, const string& value) {
seen.emplace_back(indices[0], indices[1], value);
});
@@ -649,7 +648,7 @@ TEST_F(LiteralUtilTest, TransposeR4) {
// clang-format on
auto reshape = original->Transpose(/*permutation=*/{2, 3, 0, 1});
- reshape->EachCell<float>([&](ArraySlice<int64> indices, float value) {
+ reshape->EachCell<float>([&](absl::Span<const int64> indices, float value) {
EXPECT_EQ(value, original->Get<float>(
{indices[2], indices[3], indices[0], indices[1]}));
});
@@ -889,7 +888,7 @@ TEST_F(LiteralUtilTest, CopySliceFrom) {
const int64 zero_base[] = {0, 0, 0, 0};
const int64 step[] = {1, 1, 1, 1};
uint32 seqnr = 0;
- auto init_proc = [&](ArraySlice<int64> indexes) {
+ auto init_proc = [&](absl::Span<const int64> indexes) {
source->Set(indexes, ++seqnr);
return true;
};
@@ -905,7 +904,7 @@ TEST_F(LiteralUtilTest, CopySliceFrom) {
std::vector<int64> source_indexes(TF_ARRAYSIZE(dimensions), 0);
std::vector<int64> blank_indexes(TF_ARRAYSIZE(dimensions), 0);
bool matched = true;
- auto check_proc = [&](ArraySlice<int64> indexes) {
+ auto check_proc = [&](absl::Span<const int64> indexes) {
std::copy(indexes.begin(), indexes.end(), source_indexes.begin());
std::transform(source_indexes.begin(), source_indexes.end(), src_base,
source_indexes.begin(), std::plus<int64>());
@@ -1093,7 +1092,7 @@ TEST_F(LiteralUtilTest, Populate) {
primitive_util::NativeToPrimitiveType<uint32>(), data.dimensions,
data.layout);
auto literal = absl::make_unique<Literal>(shape);
- auto generator = [&](ArraySlice<int64> indexes) -> uint32 {
+ auto generator = [&](absl::Span<const int64> indexes) -> uint32 {
// Offsets from linear index just to avoid R0 literals to be initialized
// with zero.
return IndexUtil::MultidimensionalIndexToLinearIndex(literal->shape(),
@@ -1105,7 +1104,7 @@ TEST_F(LiteralUtilTest, Populate) {
std::vector<int64> zero_base(data.dimensions.size(), 0);
std::vector<int64> step(data.dimensions.size(), 1);
bool matched = true;
- auto check_function = [&](ArraySlice<int64> indexes) {
+ auto check_function = [&](absl::Span<const int64> indexes) {
auto value = literal->Get<uint32>(indexes);
matched = matched && (value == generator(indexes));
return matched;
@@ -1135,7 +1134,7 @@ TEST_F(LiteralUtilTest, PopulateParallel) {
primitive_util::NativeToPrimitiveType<uint32>(), data.dimensions,
data.layout);
auto literal = absl::make_unique<Literal>(shape);
- auto generator = [&](ArraySlice<int64> indexes) -> uint32 {
+ auto generator = [&](absl::Span<const int64> indexes) -> uint32 {
// Offsets from linear index just to avoid R0 literals to be initialized
// with zero.
return IndexUtil::MultidimensionalIndexToLinearIndex(literal->shape(),
@@ -1147,7 +1146,7 @@ TEST_F(LiteralUtilTest, PopulateParallel) {
std::vector<int64> zero_base(data.dimensions.size(), 0);
std::vector<int64> step(data.dimensions.size(), 1);
bool matched = true;
- auto check_function = [&](ArraySlice<int64> indexes) {
+ auto check_function = [&](absl::Span<const int64> indexes) {
auto value = literal->Get<uint32>(indexes);
matched = matched && (value == generator(indexes));
return matched;
diff --git a/tensorflow/compiler/xla/literal_util.cc b/tensorflow/compiler/xla/literal_util.cc
index 931d2c631b..613449cf10 100644
--- a/tensorflow/compiler/xla/literal_util.cc
+++ b/tensorflow/compiler/xla/literal_util.cc
@@ -84,8 +84,7 @@ std::unique_ptr<Literal> ConvertType(LiteralSlice literal) {
} // namespace
/* static */ std::unique_ptr<Literal> LiteralUtil::CreateFromDimensions(
- PrimitiveType primitive_type,
- tensorflow::gtl::ArraySlice<int64> dimensions) {
+ PrimitiveType primitive_type, absl::Span<const int64> dimensions) {
return Literal::CreateFromShape(
ShapeUtil::MakeShape(primitive_type, dimensions));
}
@@ -301,9 +300,8 @@ std::unique_ptr<Literal> ConvertType(LiteralSlice literal) {
}
/* static */ std::unique_ptr<Literal> LiteralUtil::ReshapeSlice(
- tensorflow::gtl::ArraySlice<int64> new_dimensions,
- tensorflow::gtl::ArraySlice<int64> minor_to_major,
- const LiteralSlice& literal) {
+ absl::Span<const int64> new_dimensions,
+ absl::Span<const int64> minor_to_major, const LiteralSlice& literal) {
int64 new_num_elements = 1;
for (int64 i = 0; i < new_dimensions.size(); ++i) {
new_num_elements *= new_dimensions[i];
@@ -430,7 +428,7 @@ std::unique_ptr<Literal> ConvertType(LiteralSlice literal) {
}
/* static */ std::unique_ptr<Literal> LiteralUtil::MakeTuple(
- tensorflow::gtl::ArraySlice<const Literal*> elements) {
+ absl::Span<const Literal* const> elements) {
std::vector<Shape> element_shapes;
for (const auto* element : elements) {
element_shapes.push_back(element->shape());
@@ -444,7 +442,7 @@ std::unique_ptr<Literal> ConvertType(LiteralSlice literal) {
}
/* static */ std::unique_ptr<Literal> LiteralUtil::MakeTupleFromSlices(
- tensorflow::gtl::ArraySlice<LiteralSlice> elements) {
+ absl::Span<const LiteralSlice> elements) {
std::vector<Shape> element_shapes;
for (const auto& element : elements) {
element_shapes.push_back(element.shape());
@@ -474,7 +472,7 @@ std::unique_ptr<Literal> ConvertType(LiteralSlice literal) {
}
/* static */ string LiteralUtil::MultiIndexAsString(
- tensorflow::gtl::ArraySlice<int64> multi_index) {
+ absl::Span<const int64> multi_index) {
return StrCat("{", absl::StrJoin(multi_index, ","), "}");
}
diff --git a/tensorflow/compiler/xla/literal_util.h b/tensorflow/compiler/xla/literal_util.h
index 3d28c070f2..fa336ae0de 100644
--- a/tensorflow/compiler/xla/literal_util.h
+++ b/tensorflow/compiler/xla/literal_util.h
@@ -71,8 +71,7 @@ class LiteralUtil {
template <typename NativeT>
static std::unique_ptr<Literal> CreateR0(NativeT value);
template <typename NativeT>
- static std::unique_ptr<Literal> CreateR1(
- tensorflow::gtl::ArraySlice<NativeT> values);
+ static std::unique_ptr<Literal> CreateR1(absl::Span<const NativeT> values);
static std::unique_ptr<Literal> CreateR1(
const tensorflow::core::Bitmap& values);
template <typename NativeT>
@@ -141,8 +140,8 @@ class LiteralUtil {
//
template <typename NativeT>
static std::unique_ptr<Literal> CreateSparse(
- tensorflow::gtl::ArraySlice<int64> dimensions, SparseIndexArray indices,
- tensorflow::gtl::ArraySlice<NativeT> values, bool sort = true);
+ absl::Span<const int64> dimensions, SparseIndexArray indices,
+ absl::Span<const NativeT> values, bool sort = true);
// Creates a scalar literal value zero of the given primitive type.
static Literal Zero(PrimitiveType primitive_type);
@@ -157,7 +156,7 @@ class LiteralUtil {
// Creates a literal of the given shape where each element is `value`.
template <typename NativeT>
static std::unique_ptr<Literal> CreateFullWithDescendingLayout(
- tensorflow::gtl::ArraySlice<int64> dimensions, NativeT value);
+ absl::Span<const int64> dimensions, NativeT value);
// Creates a new literal from an Array type. The variants not ending with
// WithLayout use the default XLA layout for the literal's linear
@@ -215,10 +214,10 @@ class LiteralUtil {
// Returns a tuple literal composed of given literals. Data is copied from the
// given elements into the returned literal.
static std::unique_ptr<Literal> MakeTuple(
- tensorflow::gtl::ArraySlice<const Literal*> elements);
+ absl::Span<const Literal* const> elements);
static std::unique_ptr<Literal> MakeTupleFromSlices(
- tensorflow::gtl::ArraySlice<LiteralSlice> elements);
+ absl::Span<const LiteralSlice> elements);
// As above, but intended to be invoked with move semantics; i.e.
//
@@ -259,8 +258,7 @@ class LiteralUtil {
// The content of the literal values is the default value of the primitive
// type of literal itself (0 for numeric types, and false for predicates).
static std::unique_ptr<Literal> CreateFromDimensions(
- PrimitiveType primitive_type,
- tensorflow::gtl::ArraySlice<int64> dimensions);
+ PrimitiveType primitive_type, absl::Span<const int64> dimensions);
// If the given literal's data type is bfloat16, converts it to a float
// literal; otherwise, returns a copy of it. If the literal is a tuple,
@@ -279,9 +277,8 @@ class LiteralUtil {
// buffer of the input literal is assumed to have the given minor_to_major
// layout order.
static std::unique_ptr<Literal> ReshapeSlice(
- tensorflow::gtl::ArraySlice<int64> new_dimensions,
- tensorflow::gtl::ArraySlice<int64> minor_to_major,
- const LiteralSlice& literal);
+ absl::Span<const int64> new_dimensions,
+ absl::Span<const int64> minor_to_major, const LiteralSlice& literal);
// Creates a literal with the supplied shape, and uses the provided value
// generator to populate the literal's values.
@@ -291,7 +288,7 @@ class LiteralUtil {
typename T = typename primitive_util::PrimitiveTypeToNative<type>::type>
static StatusOr<std::unique_ptr<Literal>> CreateRandomLiteral(
const Shape& shape,
- const std::function<T(tensorflow::gtl::ArraySlice<int64>)>& generator);
+ const std::function<T(absl::Span<const int64>)>& generator);
// Creates a literal with the supplied shape, and initializes the literal
// values using a normal distribution with given mean and stddev standard
@@ -319,8 +316,7 @@ class LiteralUtil {
// Returns a multi-dimensional index as a string. For example: '{7, 8}' will
// be returned for a 2-dimensional index with dimension 0 index equal to 7,
// dimension 1 equal to 8.
- static string MultiIndexAsString(
- tensorflow::gtl::ArraySlice<int64> multi_index);
+ static string MultiIndexAsString(absl::Span<const int64> multi_index);
};
std::ostream& operator<<(std::ostream& out, const Literal& literal);
@@ -335,7 +331,7 @@ template <typename NativeT>
template <typename NativeT>
/* static */ std::unique_ptr<Literal> LiteralUtil::CreateR1(
- tensorflow::gtl::ArraySlice<NativeT> values) {
+ absl::Span<const NativeT> values) {
auto literal = absl::make_unique<Literal>(
ShapeUtil::MakeShape(primitive_util::NativeToPrimitiveType<NativeT>(),
{static_cast<int64>(values.size())}));
@@ -427,8 +423,8 @@ template <typename NativeT>
template <typename NativeT>
/* static */ std::unique_ptr<Literal> LiteralUtil::CreateSparse(
- tensorflow::gtl::ArraySlice<int64> dimensions, SparseIndexArray indices,
- tensorflow::gtl::ArraySlice<NativeT> values, bool sort) {
+ absl::Span<const int64> dimensions, SparseIndexArray indices,
+ absl::Span<const NativeT> values, bool sort) {
int64 num_elements = values.size();
int64 rank = dimensions.size();
CHECK_EQ(num_elements, indices.index_count());
@@ -570,8 +566,8 @@ template <typename NativeT>
template <typename NativeT>
/* static */ std::unique_ptr<Literal>
-LiteralUtil::CreateFullWithDescendingLayout(
- tensorflow::gtl::ArraySlice<int64> dimensions, NativeT value) {
+LiteralUtil::CreateFullWithDescendingLayout(absl::Span<const int64> dimensions,
+ NativeT value) {
auto literal =
absl::make_unique<Literal>(ShapeUtil::MakeShapeWithDescendingLayout(
primitive_util::NativeToPrimitiveType<NativeT>(), dimensions));
@@ -583,14 +579,12 @@ template <PrimitiveType type, typename T>
/* static */ StatusOr<std::unique_ptr<Literal>>
LiteralUtil::CreateRandomLiteral(
const Shape& shape,
- const std::function<T(tensorflow::gtl::ArraySlice<int64>)>& generator) {
+ const std::function<T(absl::Span<const int64>)>& generator) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<type>::type;
TF_RET_CHECK(shape.element_type() == type);
auto literal = absl::make_unique<Literal>(shape);
TF_RETURN_IF_ERROR(literal.get()->Populate<NativeT>(
- [&](tensorflow::gtl::ArraySlice<int64> indexes) {
- return generator(indexes);
- }));
+ [&](absl::Span<const int64> indexes) { return generator(indexes); }));
return std::move(literal);
}
@@ -601,9 +595,8 @@ LiteralUtil::CreateRandomLiteral(const Shape& shape, E* engine, T mean,
using NativeT = typename primitive_util::PrimitiveTypeToNative<type>::type;
std::normal_distribution<NativeT> generator(mean, stddev);
return CreateRandomLiteral<type, NativeT>(
- shape, [&](tensorflow::gtl::ArraySlice<int64> /*indexes*/) {
- return generator(*engine);
- });
+ shape,
+ [&](absl::Span<const int64> /*indexes*/) { return generator(*engine); });
}
template <PrimitiveType type, typename T>
diff --git a/tensorflow/compiler/xla/packed_literal_reader.cc b/tensorflow/compiler/xla/packed_literal_reader.cc
index 6e42775f6f..83429b8fd3 100644
--- a/tensorflow/compiler/xla/packed_literal_reader.cc
+++ b/tensorflow/compiler/xla/packed_literal_reader.cc
@@ -61,7 +61,7 @@ StatusOr<std::unique_ptr<Literal>> PackedLiteralReader::Read(
result->PopulateWithValue(std::numeric_limits<float>::quiet_NaN());
int64 elements = ShapeUtil::ElementsIn(shape);
- tensorflow::gtl::ArraySlice<float> field = result->data<float>();
+ absl::Span<const float> field = result->data<float>();
char* data = tensorflow::bit_cast<char*>(field.data());
uint64 bytes = elements * sizeof(float);
tensorflow::StringPiece sp; // non-absl OK
diff --git a/tensorflow/compiler/xla/python/local_computation_builder.cc b/tensorflow/compiler/xla/python/local_computation_builder.cc
index b5fd747cfa..cd6e20b693 100644
--- a/tensorflow/compiler/xla/python/local_computation_builder.cc
+++ b/tensorflow/compiler/xla/python/local_computation_builder.cc
@@ -259,7 +259,7 @@ StatusOr<std::unique_ptr<Literal>> CompiledLocalComputation::Execute(
}
LocalShapedBuffer* CompiledLocalComputation::ExecuteWithShapedBuffers(
- tensorflow::gtl::ArraySlice<LocalShapedBuffer*> argument_handles) {
+ absl::Span<LocalShapedBuffer* const> argument_handles) {
LocalClient* client = GetOrCreateLocalClient();
std::vector<const ShapedBuffer*> argument_buffers;
@@ -369,8 +369,7 @@ LocalOp LocalComputationBuilder::ConstantLiteral(const Literal& literal) {
}
LocalOp LocalComputationBuilder::Broadcast(
- const LocalOp& operand,
- tensorflow::gtl::ArraySlice<int64> broadcast_sizes) {
+ const LocalOp& operand, absl::Span<const int64> broadcast_sizes) {
return xla::Broadcast(operand.op(), broadcast_sizes);
}
@@ -380,14 +379,14 @@ LocalOp LocalComputationBuilder::Pad(const LocalOp& operand,
return xla::Pad(operand.op(), padding_value.op(), padding_config);
}
-LocalOp LocalComputationBuilder::Reshape(
- const LocalOp& operand, tensorflow::gtl::ArraySlice<int64> dimensions,
- tensorflow::gtl::ArraySlice<int64> new_sizes) {
+LocalOp LocalComputationBuilder::Reshape(const LocalOp& operand,
+ absl::Span<const int64> dimensions,
+ absl::Span<const int64> new_sizes) {
return xla::Reshape(operand.op(), dimensions, new_sizes);
}
-LocalOp LocalComputationBuilder::Collapse(
- const LocalOp& operand, tensorflow::gtl::ArraySlice<int64> dimensions) {
+LocalOp LocalComputationBuilder::Collapse(const LocalOp& operand,
+ absl::Span<const int64> dimensions) {
return xla::Collapse(operand.op(), dimensions);
}
@@ -395,10 +394,10 @@ LocalOp LocalComputationBuilder::CrossReplicaSum(const LocalOp& operand) {
return xla::CrossReplicaSum(operand.op());
}
-LocalOp LocalComputationBuilder::Slice(
- const LocalOp& operand, tensorflow::gtl::ArraySlice<int64> start_indices,
- tensorflow::gtl::ArraySlice<int64> limit_indices,
- tensorflow::gtl::ArraySlice<int64> strides) {
+LocalOp LocalComputationBuilder::Slice(const LocalOp& operand,
+ absl::Span<const int64> start_indices,
+ absl::Span<const int64> limit_indices,
+ absl::Span<const int64> strides) {
return xla::Slice(operand.op(), start_indices, limit_indices, strides);
}
@@ -411,7 +410,7 @@ LocalOp LocalComputationBuilder::SliceInDim(const LocalOp& operand,
LocalOp LocalComputationBuilder::DynamicSlice(
const LocalOp& operand, const LocalOp& start_indices,
- tensorflow::gtl::ArraySlice<int64> slice_sizes) {
+ absl::Span<const int64> slice_sizes) {
return xla::DynamicSlice(operand.op(), start_indices.op(), slice_sizes);
}
@@ -421,8 +420,8 @@ LocalOp LocalComputationBuilder::DynamicUpdateSlice(
return xla::DynamicUpdateSlice(operand.op(), update.op(), start_indices.op());
}
-LocalOp LocalComputationBuilder::ConcatInDim(
- tensorflow::gtl::ArraySlice<LocalOp> operands, int64 dimension) {
+LocalOp LocalComputationBuilder::ConcatInDim(absl::Span<const LocalOp> operands,
+ int64 dimension) {
std::vector<XlaOp> xla_ops;
xla_ops.reserve(operands.size());
for (const auto& op : operands) {
@@ -433,18 +432,16 @@ LocalOp LocalComputationBuilder::ConcatInDim(
LocalOp LocalComputationBuilder::SelectAndScatterWithGeneralPadding(
const LocalOp& operand, const LocalComputation& select,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
- const LocalOp& source, const LocalOp& init_value,
- const LocalComputation& scatter) {
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding, const LocalOp& source,
+ const LocalOp& init_value, const LocalComputation& scatter) {
return xla::SelectAndScatterWithGeneralPadding(
operand.op(), select.computation(), window_dimensions, window_strides,
padding, source.op(), init_value.op(), scatter.computation());
}
-LocalOp LocalComputationBuilder::Tuple(
- tensorflow::gtl::ArraySlice<LocalOp> elements) {
+LocalOp LocalComputationBuilder::Tuple(absl::Span<const LocalOp> elements) {
std::vector<XlaOp> xla_ops;
xla_ops.reserve(elements.size());
for (const auto& op : elements) {
@@ -471,10 +468,9 @@ LocalOp LocalComputationBuilder::DotGeneral(
LocalOp LocalComputationBuilder::ConvGeneralDilated(
const LocalOp& lhs, const LocalOp& rhs,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding,
- tensorflow::gtl::ArraySlice<int64> lhs_dilation,
- tensorflow::gtl::ArraySlice<int64> rhs_dilation,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding,
+ absl::Span<const int64> lhs_dilation, absl::Span<const int64> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers) {
return xla::ConvGeneralDilated(lhs.op(), rhs.op(), window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers);
@@ -490,9 +486,8 @@ LocalOp LocalComputationBuilder::BitcastConvertType(
return xla::BitcastConvertType(operand.op(), new_element_type);
}
-LocalOp LocalComputationBuilder::Call(
- const LocalComputation& local_computation,
- tensorflow::gtl::ArraySlice<LocalOp> operands) {
+LocalOp LocalComputationBuilder::Call(const LocalComputation& local_computation,
+ absl::Span<const LocalOp> operands) {
std::vector<XlaOp> xla_ops;
xla_ops.reserve(operands.size());
for (const auto& op : operands) {
@@ -502,19 +497,18 @@ LocalOp LocalComputationBuilder::Call(
}
LocalOp LocalComputationBuilder::Transpose(
- const LocalOp& operand, tensorflow::gtl::ArraySlice<int64> permutation) {
+ const LocalOp& operand, absl::Span<const int64> permutation) {
return xla::Transpose(operand.op(), permutation);
}
-LocalOp LocalComputationBuilder::Rev(
- const LocalOp& operand, tensorflow::gtl::ArraySlice<int64> dimensions) {
+LocalOp LocalComputationBuilder::Rev(const LocalOp& operand,
+ absl::Span<const int64> dimensions) {
return xla::Rev(operand.op(), dimensions);
}
-LocalOp LocalComputationBuilder::Map(
- tensorflow::gtl::ArraySlice<LocalOp> operands,
- const LocalComputation& local_computation,
- tensorflow::gtl::ArraySlice<int64> dimensions) {
+LocalOp LocalComputationBuilder::Map(absl::Span<const LocalOp> operands,
+ const LocalComputation& local_computation,
+ absl::Span<const int64> dimensions) {
std::vector<XlaOp> xla_ops;
xla_ops.reserve(operands.size());
for (const auto& op : operands) {
@@ -528,7 +522,7 @@ LocalOp LocalComputationBuilder::Map(
LocalOp LocalComputationBuilder::Reduce(
const LocalOp& operand, const LocalOp& init_value,
const LocalComputation& local_computation,
- tensorflow::gtl::ArraySlice<int64> dimensions_to_reduce) {
+ absl::Span<const int64> dimensions_to_reduce) {
return xla::Reduce(operand.op(), init_value.op(),
local_computation.computation(), dimensions_to_reduce);
}
@@ -536,9 +530,9 @@ LocalOp LocalComputationBuilder::Reduce(
LocalOp LocalComputationBuilder::ReduceWindowWithGeneralPadding(
const LocalOp& operand, const LocalOp& init_value,
const LocalComputation& local_computation,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding) {
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64>> padding) {
return xla::ReduceWindowWithGeneralPadding(
operand.op(), init_value.op(), local_computation.computation(),
window_dimensions, window_strides, padding);
@@ -599,10 +593,10 @@ StatusOr<LocalComputation*> LocalComputationBuilder::BuildConstantSubGraph(
#define _FORWARD_UNOP(method_name) \
_FORWARD(method_name, LocalOp, (const LocalOp& operand), (operand.op()))
-#define _FORWARD_BINOP(method_name) \
- _FORWARD(method_name, LocalOp, \
- (const LocalOp& lhs, const LocalOp& rhs, \
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions), \
+#define _FORWARD_BINOP(method_name) \
+ _FORWARD(method_name, LocalOp, \
+ (const LocalOp& lhs, const LocalOp& rhs, \
+ absl::Span<const int64> broadcast_dimensions), \
(lhs.op(), rhs.op(), broadcast_dimensions))
#define _FORWARD_TRIOP(method_name) \
diff --git a/tensorflow/compiler/xla/python/local_computation_builder.h b/tensorflow/compiler/xla/python/local_computation_builder.h
index d9543b958d..df135935c3 100644
--- a/tensorflow/compiler/xla/python/local_computation_builder.h
+++ b/tensorflow/compiler/xla/python/local_computation_builder.h
@@ -122,7 +122,7 @@ class CompiledLocalComputation {
const std::vector<absl::optional<Shape> >& shapes_with_layout);
LocalShapedBuffer* ExecuteWithShapedBuffers(
- tensorflow::gtl::ArraySlice<LocalShapedBuffer*> argument_handles);
+ absl::Span<LocalShapedBuffer* const> argument_handles);
private:
std::unique_ptr<LocalExecutable> executable_;
@@ -199,46 +199,41 @@ class LocalComputationBuilder {
LocalOp ConstantLiteral(const Literal& literal);
LocalOp Broadcast(const LocalOp& operand,
- tensorflow::gtl::ArraySlice<int64> broadcast_sizes);
+ absl::Span<const int64> broadcast_sizes);
LocalOp Pad(const LocalOp& operand, const LocalOp& padding_value,
const PaddingConfig& padding_config);
- LocalOp Reshape(const LocalOp& operand,
- tensorflow::gtl::ArraySlice<int64> dimensions,
- tensorflow::gtl::ArraySlice<int64> new_sizes);
+ LocalOp Reshape(const LocalOp& operand, absl::Span<const int64> dimensions,
+ absl::Span<const int64> new_sizes);
- LocalOp Collapse(const LocalOp& operand,
- tensorflow::gtl::ArraySlice<int64> dimensions);
+ LocalOp Collapse(const LocalOp& operand, absl::Span<const int64> dimensions);
LocalOp CrossReplicaSum(const LocalOp& operand);
- LocalOp Slice(const LocalOp& operand,
- tensorflow::gtl::ArraySlice<int64> start_indices,
- tensorflow::gtl::ArraySlice<int64> limit_indices,
- tensorflow::gtl::ArraySlice<int64> strides);
+ LocalOp Slice(const LocalOp& operand, absl::Span<const int64> start_indices,
+ absl::Span<const int64> limit_indices,
+ absl::Span<const int64> strides);
LocalOp SliceInDim(const LocalOp& operand, int64 start_index,
int64 limit_index, int64 stride, int64 dimno);
LocalOp DynamicSlice(const LocalOp& operand, const LocalOp& start_indices,
- tensorflow::gtl::ArraySlice<int64> slice_sizes);
+ absl::Span<const int64> slice_sizes);
LocalOp DynamicUpdateSlice(const LocalOp& operand, const LocalOp& update,
const LocalOp& start_indices);
- LocalOp ConcatInDim(tensorflow::gtl::ArraySlice<LocalOp> operands,
- int64 dimension);
+ LocalOp ConcatInDim(absl::Span<const LocalOp> operands, int64 dimension);
LocalOp SelectAndScatterWithGeneralPadding(
const LocalOp& operand, const LocalComputation& select,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64> > padding,
- const LocalOp& source, const LocalOp& init_value,
- const LocalComputation& scatter);
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64> > padding, const LocalOp& source,
+ const LocalOp& init_value, const LocalComputation& scatter);
- LocalOp Tuple(tensorflow::gtl::ArraySlice<LocalOp> elements);
+ LocalOp Tuple(absl::Span<const LocalOp> elements);
LocalOp GetTupleElement(const LocalOp& tuple_data, int64 index);
@@ -249,10 +244,10 @@ class LocalComputationBuilder {
LocalOp ConvGeneralDilated(
const LocalOp& lhs, const LocalOp& rhs,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64> > padding,
- tensorflow::gtl::ArraySlice<int64> lhs_dilation,
- tensorflow::gtl::ArraySlice<int64> rhs_dilation,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64> > padding,
+ absl::Span<const int64> lhs_dilation,
+ absl::Span<const int64> rhs_dilation,
const ConvolutionDimensionNumbers& dimension_numbers);
LocalOp ConvertElementType(const LocalOp& operand,
@@ -262,28 +257,27 @@ class LocalComputationBuilder {
PrimitiveType new_element_type);
LocalOp Call(const LocalComputation& local_computation,
- tensorflow::gtl::ArraySlice<LocalOp> operands);
+ absl::Span<const LocalOp> operands);
LocalOp Transpose(const LocalOp& operand,
- tensorflow::gtl::ArraySlice<int64> permutation);
+ absl::Span<const int64> permutation);
- LocalOp Rev(const LocalOp& operand,
- tensorflow::gtl::ArraySlice<int64> dimensions);
+ LocalOp Rev(const LocalOp& operand, absl::Span<const int64> dimensions);
- LocalOp Map(tensorflow::gtl::ArraySlice<LocalOp> operands,
+ LocalOp Map(absl::Span<const LocalOp> operands,
const LocalComputation& local_computation,
- tensorflow::gtl::ArraySlice<int64> dimensions);
+ absl::Span<const int64> dimensions);
LocalOp Reduce(const LocalOp& operand, const LocalOp& init_value,
const LocalComputation& local_computation,
- tensorflow::gtl::ArraySlice<int64> dimensions_to_reduce);
+ absl::Span<const int64> dimensions_to_reduce);
LocalOp ReduceWindowWithGeneralPadding(
const LocalOp& operand, const LocalOp& init_value,
const LocalComputation& local_computation,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides,
- tensorflow::gtl::ArraySlice<std::pair<int64, int64> > padding);
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides,
+ absl::Span<const std::pair<int64, int64> > padding);
LocalOp RngNormal(const LocalOp& mu, const LocalOp& sigma,
const Shape& shape);
@@ -316,7 +310,7 @@ class LocalComputationBuilder {
#define _FORWARD_BINOP(method_name) \
_FORWARD(method_name, LocalOp, \
(const LocalOp& lhs, const LocalOp& rhs, \
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions))
+ absl::Span<const int64> broadcast_dimensions))
#define _FORWARD_TRIOP(method_name) \
_FORWARD(method_name, LocalOp, \
diff --git a/tensorflow/compiler/xla/python/local_computation_builder.i b/tensorflow/compiler/xla/python/local_computation_builder.i
index f6169ebf19..e6034296d7 100644
--- a/tensorflow/compiler/xla/python/local_computation_builder.i
+++ b/tensorflow/compiler/xla/python/local_computation_builder.i
@@ -22,15 +22,15 @@ limitations under the License.
//
// C++ Python
// -------------------------------------+---------------------------------------
-// ArraySlice<int64> <- sequence of int
-// ArraySlice<LocalOp> <- sequence of LocalOp
+// Span<int64> <- sequence of int
+// Span<LocalOp> <- sequence of LocalOp
// Literal <-> (nested tuple of) numpy ndarray
// std::vector<Literal> <- sequence of (nested tuple of) ndarray
// Shape -> pair holding (dtype, dimensions)
// <- object duck-typed as xla_client.Shape
// std::vector<Shape> <- sequence of xla_client.Shape objects
// PrimitiveType <- int
-// ArraySlice<pair<int64, in64>> <- sequence of int pairs
+// Span<pair<int64, in64>> <- sequence of int pairs
// PaddingConfig proto <- corresponding Python proto
// ConvolutionDimensionNumbers proto <- corresponding Python proto
// DotDimensionNumbers proto <- corresponding Python proto
@@ -267,9 +267,9 @@ tensorflow::ImportNumpy();
$result = Py_None;
}
-// ArraySlice<int64>
+// Span<int64>
-%typemap(in) tensorflow::gtl::ArraySlice<int64>
+%typemap(in) absl::Span<const int64>
(std::vector<int64> temps) {
if (!PySequence_Check($input)) {
PyErr_SetString(PyExc_TypeError, "Argument is not a sequence");
@@ -299,9 +299,9 @@ tensorflow::ImportNumpy();
$1 = temps;
}
-// ArraySlice<LocalOp>
+// Span<LocalOp>
-%typemap(in) tensorflow::gtl::ArraySlice<xla::swig::LocalOp>(
+%typemap(in) absl::Span<const xla::swig::LocalOp>(
std::vector<LocalOp> temps) {
if (!PySequence_Check($input)) {
PyErr_SetString(PyExc_TypeError, "Argument is not a sequence");
@@ -323,7 +323,7 @@ tensorflow::ImportNumpy();
// LocalShapedBuffer*
-%typemap(in) tensorflow::gtl::ArraySlice<xla::swig::LocalShapedBuffer*>
+%typemap(in) absl::Span<xla::swig::LocalShapedBuffer* const>
(std::vector<LocalShapedBuffer*> temps) {
if (!PySequence_Check($input)) {
PyErr_SetString(PyExc_TypeError, "Argument is not a sequence");
@@ -496,9 +496,9 @@ tensorflow::ImportNumpy();
$1 = static_cast<PrimitiveType>(value);
}
-// ArraySlice<pair<int64, in64>>
+// Span<pair<int64, in64>>
-%typemap(in) tensorflow::gtl::ArraySlice<std::pair<int64, int64> >
+%typemap(in) absl::Span<const std::pair<int64, int64> >
(std::vector<std::pair<int64, int64> > temps) {
if (!PySequence_Check($input)) {
PyErr_SetString(PyExc_TypeError, "Argument is not a sequence");
diff --git a/tensorflow/compiler/xla/reference_util.cc b/tensorflow/compiler/xla/reference_util.cc
index 3de7ee2bc8..a4854f593f 100644
--- a/tensorflow/compiler/xla/reference_util.cc
+++ b/tensorflow/compiler/xla/reference_util.cc
@@ -108,17 +108,15 @@ ReferenceUtil::ConvArray3DGeneralDimensionsDilated(
// array by adding a fourth dummy dimension of size 1 without stride, padding
// and dilation.
Array4D<float> a4dlhs(lhs.n1(), lhs.n2(), lhs.n3(), 1);
- a4dlhs.Each(
- [&](tensorflow::gtl::ArraySlice<int64> indices, float* value_ptr) {
- CHECK_EQ(indices[3], 0);
- *value_ptr = lhs.operator()(indices[0], indices[1], indices[2]);
- });
+ a4dlhs.Each([&](absl::Span<const int64> indices, float* value_ptr) {
+ CHECK_EQ(indices[3], 0);
+ *value_ptr = lhs.operator()(indices[0], indices[1], indices[2]);
+ });
Array4D<float> a4drhs(rhs.n1(), rhs.n2(), rhs.n3(), 1);
- a4drhs.Each(
- [&](tensorflow::gtl::ArraySlice<int64> indices, float* value_ptr) {
- CHECK_EQ(indices[3], 0);
- *value_ptr = rhs.operator()(indices[0], indices[1], indices[2]);
- });
+ a4drhs.Each([&](absl::Span<const int64> indices, float* value_ptr) {
+ CHECK_EQ(indices[3], 0);
+ *value_ptr = rhs.operator()(indices[0], indices[1], indices[2]);
+ });
// Add a second dummy spatial dimensions.
ConvolutionDimensionNumbers dnums2d = dnums;
dnums2d.add_input_spatial_dimensions(3);
@@ -130,11 +128,10 @@ ReferenceUtil::ConvArray3DGeneralDimensionsDilated(
auto convr3 = absl::make_unique<Array3D<float>>(
convr4->planes(), convr4->depth(), convr4->height());
- convr4->Each(
- [&](tensorflow::gtl::ArraySlice<int64> indices, float* value_ptr) {
- CHECK_EQ(indices[3], 0);
- convr3->operator()(indices[0], indices[1], indices[2]) = *value_ptr;
- });
+ convr4->Each([&](absl::Span<const int64> indices, float* value_ptr) {
+ CHECK_EQ(indices[3], 0);
+ convr3->operator()(indices[0], indices[1], indices[2]) = *value_ptr;
+ });
return convr3;
}
@@ -189,11 +186,11 @@ ReferenceUtil::SeparableConvArray4D(const Array4D<float>& input,
/* static */ std::unique_ptr<std::vector<float>>
ReferenceUtil::ReduceWindow1DGeneric(
- const tensorflow::gtl::ArraySlice<float>& operand, float init,
+ const absl::Span<const float>& operand, float init,
const std::function<float(float, float)>& reduce_func,
- const tensorflow::gtl::ArraySlice<int64>& window,
- const tensorflow::gtl::ArraySlice<int64>& stride,
- const tensorflow::gtl::ArraySlice<std::pair<int64, int64>>& padding) {
+ const absl::Span<const int64>& window,
+ const absl::Span<const int64>& stride,
+ const absl::Span<const std::pair<int64, int64>>& padding) {
std::vector<int64> dim_lengths{static_cast<int64>(operand.size())};
std::vector<int64> window_counts(window.size(), 0);
std::vector<int64> pad_low(window.size(), 0);
@@ -221,10 +218,11 @@ ReferenceUtil::ReduceWindow1DGeneric(
}
/* static */ std::unique_ptr<std::vector<float>>
-ReferenceUtil::ReduceWindow1DAdd(
- const tensorflow::gtl::ArraySlice<float>& operand, float init,
- const tensorflow::gtl::ArraySlice<int64>& window,
- const tensorflow::gtl::ArraySlice<int64>& stride, Padding padding) {
+ReferenceUtil::ReduceWindow1DAdd(const absl::Span<const float>& operand,
+ float init,
+ const absl::Span<const int64>& window,
+ const absl::Span<const int64>& stride,
+ Padding padding) {
const auto add_reduce = [](float arg1, float arg2) { return arg1 + arg2; };
std::vector<int64> dim_lengths{static_cast<int64>(operand.size())};
return ReduceWindow1DGeneric(
@@ -236,9 +234,9 @@ ReferenceUtil::ReduceWindow1DAdd(
ReferenceUtil::ReduceWindow2DGeneric(
const Array2D<float>& operand, float init,
const std::function<float(float, float)>& reduce_func,
- const tensorflow::gtl::ArraySlice<int64>& window,
- const tensorflow::gtl::ArraySlice<int64>& stride,
- const tensorflow::gtl::ArraySlice<std::pair<int64, int64>>& padding) {
+ const absl::Span<const int64>& window,
+ const absl::Span<const int64>& stride,
+ const absl::Span<const std::pair<int64, int64>>& padding) {
std::vector<int64> dim_lengths{operand.height(), operand.width()};
std::vector<int64> window_counts(window.size(), 0);
@@ -276,8 +274,8 @@ ReferenceUtil::ReduceWindow2DGeneric(
/* static */ std::unique_ptr<Array2D<float>> ReferenceUtil::ReduceWindow2DAdd(
const Array2D<float>& operand, float init,
- const tensorflow::gtl::ArraySlice<int64>& window,
- const tensorflow::gtl::ArraySlice<int64>& stride, Padding padding) {
+ const absl::Span<const int64>& window,
+ const absl::Span<const int64>& stride, Padding padding) {
const auto add_reduce = [](float arg1, float arg2) { return arg1 + arg2; };
std::vector<int64> dim_lengths{operand.height(), operand.width()};
return ReduceWindow2DGeneric(
@@ -287,8 +285,8 @@ ReferenceUtil::ReduceWindow2DGeneric(
/* static */ std::unique_ptr<Array3D<float>> ReferenceUtil::ReduceWindow3DAdd(
const Array3D<float>& operand, float init,
- const tensorflow::gtl::ArraySlice<int64>& window,
- const tensorflow::gtl::ArraySlice<int64>& stride, Padding padding) {
+ const absl::Span<const int64>& window,
+ const absl::Span<const int64>& stride, Padding padding) {
std::vector<int64> dim_lengths{operand.n1(), operand.n2(), operand.n3()};
auto padding_both = xla::MakePadding(dim_lengths, window, stride, padding);
@@ -334,8 +332,8 @@ ReferenceUtil::ReduceWindow2DGeneric(
ReferenceUtil::ReduceWindow4DGeneric(
const Array4D<float>& operand, float init,
const std::function<float(float, float)>& reduce_func,
- const tensorflow::gtl::ArraySlice<int64>& window,
- const tensorflow::gtl::ArraySlice<int64>& stride, Padding padding) {
+ const absl::Span<const int64>& window,
+ const absl::Span<const int64>& stride, Padding padding) {
std::vector<int64> dim_lengths{operand.n1(), operand.n2(), operand.n3(),
operand.n4()};
return ReduceWindow4DGeneric(
@@ -347,9 +345,9 @@ ReferenceUtil::ReduceWindow4DGeneric(
ReferenceUtil::ReduceWindow4DGeneric(
const Array4D<float>& operand, float init,
const std::function<float(float, float)>& reduce_func,
- const tensorflow::gtl::ArraySlice<int64>& window,
- const tensorflow::gtl::ArraySlice<int64>& stride,
- const tensorflow::gtl::ArraySlice<std::pair<int64, int64>>& padding) {
+ const absl::Span<const int64>& window,
+ const absl::Span<const int64>& stride,
+ const absl::Span<const std::pair<int64, int64>>& padding) {
std::vector<int64> dim_lengths{operand.n1(), operand.n2(), operand.n3(),
operand.n4()};
@@ -402,8 +400,8 @@ ReferenceUtil::ReduceWindow4DGeneric(
/* static */ std::unique_ptr<Array4D<float>> ReferenceUtil::ReduceWindow4DAdd(
const Array4D<float>& operand, float init,
- const tensorflow::gtl::ArraySlice<int64>& window,
- const tensorflow::gtl::ArraySlice<int64>& stride, Padding padding) {
+ const absl::Span<const int64>& window,
+ const absl::Span<const int64>& stride, Padding padding) {
const auto add_reduce = [](float arg1, float arg2) { return arg1 + arg2; };
return ReduceWindow4DGeneric(operand, init, add_reduce, window, stride,
padding);
@@ -424,10 +422,12 @@ ReferenceUtil::ReduceWindow4DGeneric(
}
/* static */ std::unique_ptr<Array4D<float>>
-ReferenceUtil::SelectAndScatter4DGePlus(
- const Array4D<float>& operand, const Array4D<float>& source, float init,
- const tensorflow::gtl::ArraySlice<int64>& window,
- const tensorflow::gtl::ArraySlice<int64>& stride, bool same_padding) {
+ReferenceUtil::SelectAndScatter4DGePlus(const Array4D<float>& operand,
+ const Array4D<float>& source,
+ float init,
+ const absl::Span<const int64>& window,
+ const absl::Span<const int64>& stride,
+ bool same_padding) {
Padding padding = same_padding ? Padding::kSame : Padding::kValid;
auto result = absl::make_unique<Array4D<float>>(operand.n1(), operand.n2(),
operand.n3(), operand.n4());
@@ -591,7 +591,7 @@ ReferenceUtil::ConvArray4DGeneralDimensionsDilated(
result_literal->shape().dimensions(2),
result_literal->shape().dimensions(3));
- result->Each([&](tensorflow::gtl::ArraySlice<int64> indices, float* value) {
+ result->Each([&](absl::Span<const int64> indices, float* value) {
*value = result_literal->Get<float>(indices);
});
@@ -633,8 +633,7 @@ ReferenceUtil::ReduceToRowArray2D(
}
/*static*/ std::vector<float> ReferenceUtil::Reduce4DTo1D(
- const Array4D<float>& array, float init,
- tensorflow::gtl::ArraySlice<int64> dims,
+ const Array4D<float>& array, float init, absl::Span<const int64> dims,
const std::function<float(float, float)>& reduce_function) {
std::vector<float> result;
CHECK_EQ(dims.size(), 3);
@@ -707,8 +706,7 @@ ReferenceUtil::ReduceToRowArray2D(
}
/* static */ std::unique_ptr<Array2D<float>> ReferenceUtil::Reduce3DTo2D(
- const Array3D<float>& array, float init,
- tensorflow::gtl::ArraySlice<int64> dims,
+ const Array3D<float>& array, float init, absl::Span<const int64> dims,
const std::function<float(float, float)>& reduce_function) {
CHECK_EQ(dims.size(), 1);
int64 rows = dims[0] == 0 ? array.n2() : array.n1();
diff --git a/tensorflow/compiler/xla/reference_util.h b/tensorflow/compiler/xla/reference_util.h
index 88f853a359..b1d530c59e 100644
--- a/tensorflow/compiler/xla/reference_util.h
+++ b/tensorflow/compiler/xla/reference_util.h
@@ -144,8 +144,7 @@ class ReferenceUtil {
// Returns the result of reducing the 4D array to a vector, reducing away
// the dimensions specified in dims.
static std::vector<float> Reduce4DTo1D(
- const Array4D<float>& array, float init,
- tensorflow::gtl::ArraySlice<int64> dims,
+ const Array4D<float>& array, float init, absl::Span<const int64> dims,
const std::function<float(float, float)>& reduce_function);
// Broadcast 1D dimension to 4D, from the dimension `broadcast_from_dim`.
@@ -156,8 +155,7 @@ class ReferenceUtil {
// Returns the result of reducing the 3D array to a 2D array, reducing away
// the dimensions specified in dims.
static std::unique_ptr<Array2D<float>> Reduce3DTo2D(
- const Array3D<float>& array, float init,
- tensorflow::gtl::ArraySlice<int64> dims,
+ const Array3D<float>& array, float init, absl::Span<const int64> dims,
const std::function<float(float, float)>& reduce_function);
// Applies map_function to each element in the input (2D array) and returns
@@ -179,47 +177,47 @@ class ReferenceUtil {
// Windowed reductions with Add as the function to apply.
static std::unique_ptr<std::vector<float>> ReduceWindow1DAdd(
- const tensorflow::gtl::ArraySlice<float>& operand, float init,
- const tensorflow::gtl::ArraySlice<int64>& window,
- const tensorflow::gtl::ArraySlice<int64>& stride, Padding padding);
+ const absl::Span<const float>& operand, float init,
+ const absl::Span<const int64>& window,
+ const absl::Span<const int64>& stride, Padding padding);
static std::unique_ptr<Array2D<float>> ReduceWindow2DAdd(
const Array2D<float>& operand, float init,
- const tensorflow::gtl::ArraySlice<int64>& window,
- const tensorflow::gtl::ArraySlice<int64>& stride, Padding padding);
+ const absl::Span<const int64>& window,
+ const absl::Span<const int64>& stride, Padding padding);
static std::unique_ptr<Array3D<float>> ReduceWindow3DAdd(
const Array3D<float>& operand, float init,
- const tensorflow::gtl::ArraySlice<int64>& window,
- const tensorflow::gtl::ArraySlice<int64>& stride, Padding padding);
+ const absl::Span<const int64>& window,
+ const absl::Span<const int64>& stride, Padding padding);
static std::unique_ptr<Array4D<float>> ReduceWindow4DAdd(
const Array4D<float>& operand, float init,
- const tensorflow::gtl::ArraySlice<int64>& window,
- const tensorflow::gtl::ArraySlice<int64>& stride, Padding padding);
+ const absl::Span<const int64>& window,
+ const absl::Span<const int64>& stride, Padding padding);
// Windowed reductions with a generic reduce function.
static std::unique_ptr<std::vector<float>> ReduceWindow1DGeneric(
- const tensorflow::gtl::ArraySlice<float>& operand, float init,
+ const absl::Span<const float>& operand, float init,
const std::function<float(float, float)>& reduce_func,
- const tensorflow::gtl::ArraySlice<int64>& window,
- const tensorflow::gtl::ArraySlice<int64>& stride,
- const tensorflow::gtl::ArraySlice<std::pair<int64, int64>>& padding);
+ const absl::Span<const int64>& window,
+ const absl::Span<const int64>& stride,
+ const absl::Span<const std::pair<int64, int64>>& padding);
static std::unique_ptr<Array2D<float>> ReduceWindow2DGeneric(
const Array2D<float>& operand, float init,
const std::function<float(float, float)>& reduce_func,
- const tensorflow::gtl::ArraySlice<int64>& window,
- const tensorflow::gtl::ArraySlice<int64>& stride,
- const tensorflow::gtl::ArraySlice<std::pair<int64, int64>>& padding);
+ const absl::Span<const int64>& window,
+ const absl::Span<const int64>& stride,
+ const absl::Span<const std::pair<int64, int64>>& padding);
static std::unique_ptr<Array4D<float>> ReduceWindow4DGeneric(
const Array4D<float>& operand, float init,
const std::function<float(float, float)>& reduce_func,
- const tensorflow::gtl::ArraySlice<int64>& window,
- const tensorflow::gtl::ArraySlice<int64>& stride, Padding padding);
+ const absl::Span<const int64>& window,
+ const absl::Span<const int64>& stride, Padding padding);
// With arbitrary padding.
static std::unique_ptr<Array4D<float>> ReduceWindow4DGeneric(
const Array4D<float>& operand, float init,
const std::function<float(float, float)>& reduce_func,
- const tensorflow::gtl::ArraySlice<int64>& window,
- const tensorflow::gtl::ArraySlice<int64>& stride,
- const tensorflow::gtl::ArraySlice<std::pair<int64, int64>>& padding);
+ const absl::Span<const int64>& window,
+ const absl::Span<const int64>& stride,
+ const absl::Span<const std::pair<int64, int64>>& padding);
// Batch normalize data.
static std::unique_ptr<Array4D<float>> BatchNorm4D(
@@ -232,8 +230,8 @@ class ReferenceUtil {
// TODO(b/74533103) Switch tests to evaluator and remove this implementation.
static std::unique_ptr<Array4D<float>> SelectAndScatter4DGePlus(
const Array4D<float>& operand, const Array4D<float>& source, float init,
- const tensorflow::gtl::ArraySlice<int64>& window,
- const tensorflow::gtl::ArraySlice<int64>& stride, bool same_padding);
+ const absl::Span<const int64>& window,
+ const absl::Span<const int64>& stride, bool same_padding);
// Concatenates the lhs and rhs arrays along the concatenate_dimension.
// E.g. if concatenate_dimension is 0, the "n1"/height dimension is
@@ -334,8 +332,8 @@ class ReferenceUtil {
// Slices with index clamping
template <typename T>
- static std::vector<T> ClampSlice1D(
- const tensorflow::gtl::ArraySlice<T>& input, int64 start, int64 size) {
+ static std::vector<T> ClampSlice1D(const absl::Span<const T>& input,
+ int64 start, int64 size) {
start = std::min<int64>(std::max<int64>(0, start), input.size() - size);
std::vector<T> result;
for (int64 i = 0; i < size; ++i) {
@@ -633,7 +631,7 @@ class ReferenceUtil {
Array4D<NativeT> result(output_bounds[0], output_bounds[1],
output_bounds[2], output_bounds[3]);
result.Each(
- [&](tensorflow::gtl::ArraySlice<int64> indices, NativeT* value) {
+ [&](absl::Span<const int64> indices, NativeT* value) {
for (int i = 0; i < 4; ++i) {
bool in_low_padding = indices[i] < pad_low[i];
bool in_high_padding = indices[i] >= output_bounds[i] - pad_high[i];
diff --git a/tensorflow/compiler/xla/service/algebraic_simplifier.cc b/tensorflow/compiler/xla/service/algebraic_simplifier.cc
index 196865f333..a7a0044308 100644
--- a/tensorflow/compiler/xla/service/algebraic_simplifier.cc
+++ b/tensorflow/compiler/xla/service/algebraic_simplifier.cc
@@ -449,8 +449,7 @@ Status AlgebraicSimplifierVisitor::HandleCopy(HloInstruction* copy) {
Status AlgebraicSimplifierVisitor::HandleConcatenate(
HloInstruction* concatenate) {
- tensorflow::gtl::ArraySlice<HloInstruction*> operands(
- concatenate->operands());
+ absl::Span<HloInstruction* const> operands(concatenate->operands());
if (operands.size() == 1) {
// Unary concatenates are useless.
ReplaceInstructionIfSameShape(concatenate, operands[0]);
@@ -588,7 +587,7 @@ Status AlgebraicSimplifierVisitor::HandleSubtract(HloInstruction* sub) {
namespace {
template <typename T>
Status InvertConstant(const HloInstruction& constant, Literal* result) {
- return result->Populate<T>([&](tensorflow::gtl::ArraySlice<int64> indices) {
+ return result->Populate<T>([&](absl::Span<const int64> indices) {
return T{1.0} / constant.literal().Get<T>(indices);
});
}
@@ -1249,8 +1248,7 @@ namespace {
//
// Precondition: input_dim_indices is sorted.
absl::optional<std::vector<int64>> ReshapeLeavesDimensionsUnmodified(
- const HloInstruction* hlo,
- tensorflow::gtl::ArraySlice<int64> input_dim_indices) {
+ const HloInstruction* hlo, absl::Span<const int64> input_dim_indices) {
CHECK_EQ(HloOpcode::kReshape, hlo->opcode());
CHECK(std::is_sorted(input_dim_indices.begin(), input_dim_indices.end()));
@@ -1853,7 +1851,7 @@ Status AlgebraicSimplifierVisitor::HandleReduce(HloInstruction* reduce) {
auto arg = reduce->mutable_operand(0);
auto init_value = reduce->mutable_operand(1);
- tensorflow::gtl::ArraySlice<int64> dimensions(reduce->dimensions());
+ absl::Span<const int64> dimensions(reduce->dimensions());
HloComputation* function = reduce->to_apply();
if (ShapeUtil::IsZeroElementArray(arg->shape()) ||
ShapeUtil::IsZeroElementArray(reduce->shape())) {
diff --git a/tensorflow/compiler/xla/service/algebraic_simplifier_test.cc b/tensorflow/compiler/xla/service/algebraic_simplifier_test.cc
index cbce98ef13..182c581ad8 100644
--- a/tensorflow/compiler/xla/service/algebraic_simplifier_test.cc
+++ b/tensorflow/compiler/xla/service/algebraic_simplifier_test.cc
@@ -2226,7 +2226,7 @@ TEST_F(AlgebraicSimplifierTest, ConvertConvToMatmul) {
auto out_dims = in_dims;
out_dims[in_channel_idx] = options.f_output_channels;
- auto make_shape = [](tensorflow::gtl::ArraySlice<int64> dims,
+ auto make_shape = [](absl::Span<const int64> dims,
bool minor_to_major_layout) {
if (minor_to_major_layout) {
return ShapeUtil::MakeShapeWithLayout(F32, dims, {0, 1, 2, 3});
@@ -2838,8 +2838,8 @@ TEST_P(PadReduceWindowEffectiveBroadcastTest, DoIt) {
// a and b are parallel bounds we can either turn into a B F S0 S1 or
// `B S0 S1 F` kind of pattern.
- auto decorate_spatials = [&param](tensorflow::gtl::ArraySlice<int64> spatials,
- int64 a, int64 b) {
+ auto decorate_spatials = [&param](absl::Span<const int64> spatials, int64 a,
+ int64 b) {
std::vector<int64> result;
if (param.prepend_a) {
result.push_back(a);
diff --git a/tensorflow/compiler/xla/service/backend.cc b/tensorflow/compiler/xla/service/backend.cc
index a6889cb171..5c180cbdd4 100644
--- a/tensorflow/compiler/xla/service/backend.cc
+++ b/tensorflow/compiler/xla/service/backend.cc
@@ -112,11 +112,11 @@ StatusOr<StreamPool::Ptr> Backend::BorrowStream(se::StreamExecutor* executor) {
return stream_pools_.at(executor).BorrowStream(executor);
}
-Backend::Backend(
- se::Platform* platform, Compiler* compiler,
- tensorflow::gtl::ArraySlice<se::StreamExecutor*> stream_executors,
- TransferManager* transfer_manager, ComputationPlacer* computation_placer,
- int intra_op_parallelism_threads)
+Backend::Backend(se::Platform* platform, Compiler* compiler,
+ absl::Span<se::StreamExecutor* const> stream_executors,
+ TransferManager* transfer_manager,
+ ComputationPlacer* computation_placer,
+ int intra_op_parallelism_threads)
: platform_(platform),
compiler_(compiler),
transfer_manager_(transfer_manager),
diff --git a/tensorflow/compiler/xla/service/backend.h b/tensorflow/compiler/xla/service/backend.h
index 4a6a78daf0..fdf8d9cab2 100644
--- a/tensorflow/compiler/xla/service/backend.h
+++ b/tensorflow/compiler/xla/service/backend.h
@@ -149,7 +149,7 @@ class Backend {
private:
struct EigenThreadPoolWrapper;
Backend(se::Platform* platform, Compiler* compiler,
- tensorflow::gtl::ArraySlice<se::StreamExecutor*> stream_executors,
+ absl::Span<se::StreamExecutor* const> stream_executors,
TransferManager* transfer_manager,
ComputationPlacer* computation_placer,
int intra_op_parallelism_threads);
diff --git a/tensorflow/compiler/xla/service/bfloat16_normalization.cc b/tensorflow/compiler/xla/service/bfloat16_normalization.cc
index a6f77db3b0..b5cf245af6 100644
--- a/tensorflow/compiler/xla/service/bfloat16_normalization.cc
+++ b/tensorflow/compiler/xla/service/bfloat16_normalization.cc
@@ -69,8 +69,7 @@ class BFloat16NormalizationVisitor : public DfsHloVisitorWithDefault {
// Inserts conversion HLOs to replace the called computations' BF16
// operands/outputs to F32.
Status ConvertCalledComputations(
- HloInstruction* hlo,
- tensorflow::gtl::ArraySlice<HloComputation*> bf16_called_comps);
+ HloInstruction* hlo, absl::Span<HloComputation* const> bf16_called_comps);
HloComputation* computation_;
const BFloat16Support* bfloat16_support_;
@@ -114,8 +113,7 @@ Status BFloat16NormalizationVisitor::InsertConvertBeforeOperand(
}
Status BFloat16NormalizationVisitor::ConvertCalledComputations(
- HloInstruction* hlo,
- tensorflow::gtl::ArraySlice<HloComputation*> bf16_called_comps) {
+ HloInstruction* hlo, absl::Span<HloComputation* const> bf16_called_comps) {
std::map<HloComputation*, HloComputation*> cloned_computations;
for (auto& comp : bf16_called_comps) {
auto cloned = comp->parent()->AddEmbeddedComputation(comp->Clone());
diff --git a/tensorflow/compiler/xla/service/bfloat16_propagation.cc b/tensorflow/compiler/xla/service/bfloat16_propagation.cc
index 2fb401c428..545a6ecfb1 100644
--- a/tensorflow/compiler/xla/service/bfloat16_propagation.cc
+++ b/tensorflow/compiler/xla/service/bfloat16_propagation.cc
@@ -407,7 +407,7 @@ void BFloat16Propagation::AdjustCalledComputationParameters(
HloInstruction* hlo) {
auto adjust_computation =
[this, hlo](HloComputation* computation,
- tensorflow::gtl::ArraySlice<HloInstruction*> operands) {
+ absl::Span<HloInstruction* const> operands) {
// Adjust parameters.
CHECK_EQ(operands.size(), computation->num_parameters());
for (int64 i = 0; i < operands.size(); ++i) {
diff --git a/tensorflow/compiler/xla/service/buffer_assignment_test.cc b/tensorflow/compiler/xla/service/buffer_assignment_test.cc
index e9751cc269..8bd1533972 100644
--- a/tensorflow/compiler/xla/service/buffer_assignment_test.cc
+++ b/tensorflow/compiler/xla/service/buffer_assignment_test.cc
@@ -118,7 +118,7 @@ class BufferAssignmentTest : public HloVerifiedTestBase {
std::unique_ptr<BufferAssignment> RunBufferAssignmentWithInstructionSequence(
HloModule* module,
- tensorflow::gtl::ArraySlice<const HloInstruction*> instruction_sequence,
+ absl::Span<const HloInstruction* const> instruction_sequence,
int64 alignment = 1) {
SequentialHloOrdering::HloModuleSequence module_sequence;
module_sequence[module->entry_computation()] =
diff --git a/tensorflow/compiler/xla/service/compile_only_service.cc b/tensorflow/compiler/xla/service/compile_only_service.cc
index 3079695e96..e5a6c28478 100644
--- a/tensorflow/compiler/xla/service/compile_only_service.cc
+++ b/tensorflow/compiler/xla/service/compile_only_service.cc
@@ -62,7 +62,7 @@ CompileOnlyService::CompileOnlyService(const ServiceOptions& options,
StatusOr<std::vector<std::unique_ptr<AotCompilationResult>>>
CompileOnlyService::CompileAheadOfTime(
- const tensorflow::gtl::ArraySlice<AotXlaComputationInstance> computations,
+ const absl::Span<const AotXlaComputationInstance> computations,
const AotCompilationOptions& options,
std::unique_ptr<AotCompilationMetadata>* metadata) {
std::vector<std::unique_ptr<HloModule>> hlo_modules;
diff --git a/tensorflow/compiler/xla/service/compile_only_service.h b/tensorflow/compiler/xla/service/compile_only_service.h
index 1ac950bdd6..61136a3e11 100644
--- a/tensorflow/compiler/xla/service/compile_only_service.h
+++ b/tensorflow/compiler/xla/service/compile_only_service.h
@@ -50,12 +50,12 @@ class CompileOnlyService : public Service {
// |CompileOnlyClient::CompileAheadOfTime| for additional details.
StatusOr<std::vector<std::unique_ptr<AotCompilationResult>>>
CompileAheadOfTime(
- const tensorflow::gtl::ArraySlice<AotXlaComputationInstance> computations,
+ const absl::Span<const AotXlaComputationInstance> computations,
const AotCompilationOptions& options);
StatusOr<std::vector<std::unique_ptr<AotCompilationResult>>>
CompileAheadOfTime(
- const tensorflow::gtl::ArraySlice<AotXlaComputationInstance> computations,
+ const absl::Span<const AotXlaComputationInstance> computations,
const AotCompilationOptions& options,
std::unique_ptr<AotCompilationMetadata>* metadata);
diff --git a/tensorflow/compiler/xla/service/copy_insertion.cc b/tensorflow/compiler/xla/service/copy_insertion.cc
index 1b7a7b36ea..b65dfef9c9 100644
--- a/tensorflow/compiler/xla/service/copy_insertion.cc
+++ b/tensorflow/compiler/xla/service/copy_insertion.cc
@@ -479,7 +479,7 @@ class CopyRemover {
// 'values' an entry is created in value_to_node which indicates the
// respective ValueNode representing that value.
void AddValueList(
- tensorflow::gtl::ArraySlice<const HloValue*> values,
+ absl::Span<const HloValue* const> values,
tensorflow::gtl::FlatMap<const HloValue*, ValueNode*>* value_to_node) {
ValueNode* tail = nullptr;
ValueNode* head = nullptr;
diff --git a/tensorflow/compiler/xla/service/cpu/buffer_info_util.cc b/tensorflow/compiler/xla/service/cpu/buffer_info_util.cc
index 408fe0f5bf..1942ea1a2a 100644
--- a/tensorflow/compiler/xla/service/cpu/buffer_info_util.cc
+++ b/tensorflow/compiler/xla/service/cpu/buffer_info_util.cc
@@ -40,7 +40,7 @@ std::vector<BufferInfo> CreateBufferInfosFromBufferAssignment(
}
std::vector<int32> CreateArgIndexTableFromBufferInfos(
- tensorflow::gtl::ArraySlice<BufferInfo> buffer_infos) {
+ absl::Span<const BufferInfo> buffer_infos) {
std::vector<int32> result;
for (int64 i = 0; i < buffer_infos.size(); i++) {
if (buffer_infos[i].is_entry_parameter()) {
diff --git a/tensorflow/compiler/xla/service/cpu/buffer_info_util.h b/tensorflow/compiler/xla/service/cpu/buffer_info_util.h
index 05de70c726..0c5a60f13f 100644
--- a/tensorflow/compiler/xla/service/cpu/buffer_info_util.h
+++ b/tensorflow/compiler/xla/service/cpu/buffer_info_util.h
@@ -34,7 +34,7 @@ CreateBufferInfosFromBufferAssignment(
// If this function returns V then entry parameter i has buffer allocation index
// V[i].
std::vector<int32> CreateArgIndexTableFromBufferInfos(
- tensorflow::gtl::ArraySlice<::tensorflow::cpu_function_runtime::BufferInfo>
+ absl::Span<const ::tensorflow::cpu_function_runtime::BufferInfo>
buffer_infos);
} // namespace cpu
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/cpu/cpu_executable.cc b/tensorflow/compiler/xla/service/cpu/cpu_executable.cc
index bf2efc4d14..9b00f2eaa5 100644
--- a/tensorflow/compiler/xla/service/cpu/cpu_executable.cc
+++ b/tensorflow/compiler/xla/service/cpu/cpu_executable.cc
@@ -77,7 +77,7 @@ StatusOr<std::pair<std::vector<se::DeviceMemoryBase>,
std::vector<OwningDeviceMemory>>>
CpuExecutable::CreateTempArray(
DeviceMemoryAllocator* memory_allocator, int device_ordinal,
- tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments) {
+ absl::Span<const ShapedBuffer* const> arguments) {
std::vector<se::DeviceMemoryBase> unowning_buffers(
assignment_->Allocations().size());
std::vector<OwningDeviceMemory> owning_buffers(
@@ -136,7 +136,7 @@ CpuExecutable::CreateTempArray(
Status CpuExecutable::ExecuteComputeFunction(
const ExecutableRunOptions* run_options,
- tensorflow::gtl::ArraySlice<se::DeviceMemoryBase> buffers,
+ absl::Span<const se::DeviceMemoryBase> buffers,
HloExecutionProfile* hlo_execution_profile) {
// The calling convention for JITed functions is:
//
@@ -207,7 +207,7 @@ Status CpuExecutable::ExecuteComputeFunction(
StatusOr<ScopedShapedBuffer> CpuExecutable::CreateResultShapedBuffer(
const ServiceExecutableRunOptions* run_options,
- tensorflow::gtl::MutableArraySlice<OwningDeviceMemory> buffers) {
+ absl::Span<OwningDeviceMemory> buffers) {
se::Stream* stream = run_options->stream();
ScopedShapedBuffer result_buffer(
/*on_host_shape=*/result_shape(),
@@ -245,7 +245,7 @@ StatusOr<ScopedShapedBuffer> CpuExecutable::CreateResultShapedBuffer(
StatusOr<ScopedShapedBuffer> CpuExecutable::ExecuteOnStream(
const ServiceExecutableRunOptions* run_options,
- tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
+ absl::Span<const ShapedBuffer* const> arguments,
HloExecutionProfile* hlo_execution_profile) {
TF_ASSIGN_OR_RETURN(
auto result,
@@ -256,7 +256,7 @@ StatusOr<ScopedShapedBuffer> CpuExecutable::ExecuteOnStream(
StatusOr<ScopedShapedBuffer> CpuExecutable::ExecuteAsyncOnStream(
const ServiceExecutableRunOptions* run_options,
- tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments) {
+ absl::Span<const ShapedBuffer* const> arguments) {
if (hlo_profiling_enabled()) {
return Unimplemented(
"Asynchronous execution on stream with hlo profiling is not yet "
@@ -267,7 +267,7 @@ StatusOr<ScopedShapedBuffer> CpuExecutable::ExecuteAsyncOnStream(
StatusOr<ScopedShapedBuffer> CpuExecutable::ExecuteAsyncOnStreamImpl(
const ServiceExecutableRunOptions* run_options,
- tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
+ absl::Span<const ShapedBuffer* const> arguments,
HloExecutionProfile* hlo_execution_profile) {
if (GetRootPointsToSet().IsAmbiguous()) {
return Unimplemented("Points-to set of root instruction is ambiguous");
@@ -299,7 +299,7 @@ StatusOr<ScopedShapedBuffer> CpuExecutable::ExecuteAsyncOnStreamImpl(
//
// We also need to change the types of some of the variables we capture:
// run_options needs to change from a pointer to a value type, and arguments
- // needs to change from an ArraySlice into a vector. We use a struct instead
+ // needs to change from a Span into a vector. We use a struct instead
// of a lambda to make this explicit.
struct AsyncRunTask {
CpuExecutable* executable;
diff --git a/tensorflow/compiler/xla/service/cpu/cpu_executable.h b/tensorflow/compiler/xla/service/cpu/cpu_executable.h
index 96e53de57e..236de8f14f 100644
--- a/tensorflow/compiler/xla/service/cpu/cpu_executable.h
+++ b/tensorflow/compiler/xla/service/cpu/cpu_executable.h
@@ -57,12 +57,12 @@ class CpuExecutable : public Executable {
StatusOr<ScopedShapedBuffer> ExecuteOnStream(
const ServiceExecutableRunOptions* run_options,
- tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
+ absl::Span<const ShapedBuffer* const> arguments,
HloExecutionProfile* hlo_execution_profile) override;
StatusOr<ScopedShapedBuffer> ExecuteAsyncOnStream(
const ServiceExecutableRunOptions* run_options,
- tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments) override;
+ absl::Span<const ShapedBuffer* const> arguments) override;
// This should be called after set_ir_module_string.
const string& ir_module_string() const { return ir_module_string_; }
@@ -92,7 +92,7 @@ class CpuExecutable : public Executable {
// exists) must out-live the task.
StatusOr<ScopedShapedBuffer> ExecuteAsyncOnStreamImpl(
const ServiceExecutableRunOptions* run_options,
- tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
+ absl::Span<const ShapedBuffer* const> arguments,
HloExecutionProfile* hlo_execution_profile);
// Creates an array suitable for passing as the "temps" argument to the JIT
@@ -112,21 +112,20 @@ class CpuExecutable : public Executable {
StatusOr<std::pair<std::vector<se::DeviceMemoryBase>,
std::vector<OwningDeviceMemory>>>
CreateTempArray(DeviceMemoryAllocator* memory_allocator, int device_ordinal,
- tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments);
+ absl::Span<const ShapedBuffer* const> arguments);
// Calls the generated function performing the computation with the given
// arguments using the supplied buffers.
- Status ExecuteComputeFunction(
- const ExecutableRunOptions* run_options,
- tensorflow::gtl::ArraySlice<se::DeviceMemoryBase> buffers,
- HloExecutionProfile* hlo_execution_profile);
+ Status ExecuteComputeFunction(const ExecutableRunOptions* run_options,
+ absl::Span<const se::DeviceMemoryBase> buffers,
+ HloExecutionProfile* hlo_execution_profile);
// Creates a ScopedShapedBuffer for holding the result of the computation,
// moving buffers out of allocated_buffers and into the result as appropriate.
// The addresses are set according to buffer assignment.
StatusOr<ScopedShapedBuffer> CreateResultShapedBuffer(
const ServiceExecutableRunOptions* run_options,
- tensorflow::gtl::MutableArraySlice<OwningDeviceMemory> buffers);
+ absl::Span<OwningDeviceMemory> buffers);
// Returns the points-to set of the root instruction of the entry
// computation. Uses points-to analysis from buffer assignment.
diff --git a/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.cc b/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.cc
index 0df2abf001..5519a43b2f 100644
--- a/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.cc
+++ b/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.cc
@@ -179,7 +179,7 @@ Status CpuTransferManager::TransferLiteralFromOutfeed(
int64 size = GetByteSizeRequirement(literal_shape);
// Note: OSS build didn't like implicit conversion from
// literal_shape.dimensions() to the array slice on 2017-07-10.
- tensorflow::gtl::ArraySlice<int64> dimensions(
+ absl::Span<const int64> dimensions(
tensorflow::bit_cast<const int64*>(literal_shape.dimensions().data()),
literal_shape.dimensions().size());
TF_ASSIGN_OR_RETURN(
@@ -225,7 +225,7 @@ Status CpuTransferManager::TransferLiteralFromOutfeed(
StatusOr<Shape> CpuTransferManager::TransferTupleBuffersFromOutfeed(
se::StreamExecutor* executor,
- tensorflow::gtl::ArraySlice<std::pair<void*, int64>> buffer_data) {
+ absl::Span<const std::pair<void*, int64>> buffer_data) {
return TransferBuffersFromOutfeedInternal(executor, buffer_data,
/*is_tuple=*/true);
}
@@ -238,8 +238,7 @@ StatusOr<Shape> CpuTransferManager::TransferArrayBufferFromOutfeed(
StatusOr<Shape> CpuTransferManager::TransferBuffersFromOutfeedInternal(
se::StreamExecutor* executor,
- tensorflow::gtl::ArraySlice<std::pair<void*, int64>> buffer_data,
- bool is_tuple) {
+ absl::Span<const std::pair<void*, int64>> buffer_data, bool is_tuple) {
std::vector<std::unique_ptr<CpuOutfeedBuffer>> buffers;
for (auto b : buffer_data) {
int64 size = b.second;
diff --git a/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.h b/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.h
index 7b938e9fd7..6927edff86 100644
--- a/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.h
+++ b/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.h
@@ -56,7 +56,7 @@ class CpuTransferManager : public GenericTransferManager {
// Helper that transfers a tuple of element buffers from the device's outfeed.
StatusOr<Shape> TransferTupleBuffersFromOutfeed(
se::StreamExecutor* executor,
- tensorflow::gtl::ArraySlice<std::pair<void*, int64>> buffer_data);
+ absl::Span<const std::pair<void*, int64>> buffer_data);
// Helper that transfers an array buffer from the device's outfeed.
StatusOr<Shape> TransferArrayBufferFromOutfeed(se::StreamExecutor* executor,
@@ -68,8 +68,7 @@ class CpuTransferManager : public GenericTransferManager {
// for the given buffers.
StatusOr<Shape> TransferBuffersFromOutfeedInternal(
se::StreamExecutor* executor,
- tensorflow::gtl::ArraySlice<std::pair<void*, int64>> buffer_data,
- bool is_tuple);
+ absl::Span<const std::pair<void*, int64>> buffer_data, bool is_tuple);
TF_DISALLOW_COPY_AND_ASSIGN(CpuTransferManager);
};
diff --git a/tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc b/tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc
index dd060f54a2..99fa707c95 100644
--- a/tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc
+++ b/tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc
@@ -80,7 +80,7 @@ class MemoryTile {
// `minor_dim_offset`}.
//
// Note: `major_dim_offset` is a parameter to the constructor.
- void StoreTile(tensorflow::gtl::ArraySlice<llvm::Value*> tile,
+ void StoreTile(absl::Span<llvm::Value* const> tile,
llvm::Value* minor_dim_offset) const {
CHECK_EQ(tile.size(), pointers_.size());
for (int64 i = 0; i < pointers_.size(); i++) {
diff --git a/tensorflow/compiler/xla/service/cpu/ir_emitter.cc b/tensorflow/compiler/xla/service/cpu/ir_emitter.cc
index 1c828cc02c..7839d97317 100644
--- a/tensorflow/compiler/xla/service/cpu/ir_emitter.cc
+++ b/tensorflow/compiler/xla/service/cpu/ir_emitter.cc
@@ -506,8 +506,7 @@ Status IrEmitter::HandleTuple(HloInstruction* tuple) {
llvm::Value* IrEmitter::EmitElementalMap(
const HloMapInstruction& map_instr,
- tensorflow::gtl::ArraySlice<llvm::Value*> elemental_operands,
- absl::string_view name) {
+ absl::Span<llvm::Value* const> elemental_operands, absl::string_view name) {
return EmitThreadLocalCall(*map_instr.to_apply(), elemental_operands, name);
}
@@ -1455,7 +1454,7 @@ IrEmitter::EmitInnerLoopForVectorizedReduction(
const ReductionGenerator& reduction_generator,
const llvm_ir::IrArray::Index& output_index,
const ShardedVectorType& accumulator_type, HloInstruction* init_value,
- HloInstruction* arg, gtl::ArraySlice<int64> dimensions,
+ HloInstruction* arg, absl::Span<const int64> dimensions,
unsigned element_alignment) {
ShardedVector accumulator;
accumulator.reserve(accumulator_type.size());
@@ -1551,7 +1550,7 @@ void IrEmitter::EmitShardedVectorStore(
StatusOr<bool> IrEmitter::EmitVectorizedReduce(
HloInstruction* reduce, HloInstruction* arg, HloInstruction* init_value,
- gtl::ArraySlice<int64> dimensions, HloComputation* function,
+ absl::Span<const int64> dimensions, HloComputation* function,
string* failure_reason) {
if (!ReductionPreservesLayout(*reduce)) {
return false;
@@ -1701,7 +1700,7 @@ StatusOr<llvm::Value*> IrEmitter::EmitTargetElementLoopBodyForReduce(
HloReduceInstruction* reduce, const llvm_ir::IrArray::Index& index) {
const HloInstruction* arg = reduce->mutable_operand(0);
const HloInstruction* init_value = reduce->mutable_operand(1);
- gtl::ArraySlice<int64> dimensions(reduce->dimensions());
+ absl::Span<const int64> dimensions(reduce->dimensions());
// Initialize an accumulator with init_value.
PrimitiveType accumulator_type = reduce->shape().element_type();
@@ -1758,7 +1757,7 @@ Status IrEmitter::HandleReduce(HloInstruction* reduce) {
}
auto arg = reduce->mutable_operand(0);
auto init_value = reduce->mutable_operand(1);
- gtl::ArraySlice<int64> dimensions(reduce->dimensions());
+ absl::Span<const int64> dimensions(reduce->dimensions());
HloComputation* function = reduce->to_apply();
if (!options::VectorizedReduceDisabled(hlo_module_config_)) {
string vectorization_failure_reason;
@@ -2113,7 +2112,7 @@ Status IrEmitter::HandleCall(HloInstruction* call) {
}
Status IrEmitter::HandleCustomCall(HloInstruction* custom_call) {
- gtl::ArraySlice<HloInstruction*> operands(custom_call->operands());
+ absl::Span<HloInstruction* const> operands(custom_call->operands());
absl::string_view custom_call_target(custom_call->custom_call_target());
llvm::Type* i8_ptr_type = b_.getInt8PtrTy();
llvm::AllocaInst* operands_alloca =
@@ -2233,7 +2232,7 @@ Status IrEmitter::HandleWhile(HloInstruction* xla_while) {
}
StatusOr<bool> IrEmitter::EmitFastConcatenate(
- HloInstruction* concatenate, gtl::ArraySlice<HloInstruction*> operands,
+ HloInstruction* concatenate, absl::Span<HloInstruction* const> operands,
string* failure_reason) {
if (ShouldEmitParallelLoopFor(*concatenate)) {
*failure_reason =
@@ -2369,7 +2368,7 @@ void IrEmitter::EmitTransferElements(llvm::Value* target, llvm::Value* source,
}
Status IrEmitter::HandleConcatenate(HloInstruction* concatenate) {
- gtl::ArraySlice<HloInstruction*> operands(concatenate->operands());
+ absl::Span<HloInstruction* const> operands(concatenate->operands());
string failure_reason;
TF_ASSIGN_OR_RETURN(
bool successful,
@@ -2800,8 +2799,8 @@ Status IrEmitter::EmitMemcpy(const HloInstruction& source,
Status IrEmitter::ElementTypesSameAndSupported(
const HloInstruction& instruction,
- gtl::ArraySlice<const HloInstruction*> operands,
- gtl::ArraySlice<PrimitiveType> supported_types) {
+ absl::Span<const HloInstruction* const> operands,
+ absl::Span<const PrimitiveType> supported_types) {
for (auto operand : operands) {
TF_RET_CHECK(
ShapeUtil::SameElementType(operands[0]->shape(), operand->shape()));
@@ -2831,8 +2830,7 @@ Status IrEmitter::DefaultAction(HloInstruction* hlo) {
}
llvm::Value* IrEmitter::EmitThreadLocalCall(
- const HloComputation& callee,
- tensorflow::gtl::ArraySlice<llvm::Value*> parameters,
+ const HloComputation& callee, absl::Span<llvm::Value* const> parameters,
absl::string_view name) {
const Shape& return_shape = callee.root_instruction()->shape();
diff --git a/tensorflow/compiler/xla/service/cpu/ir_emitter.h b/tensorflow/compiler/xla/service/cpu/ir_emitter.h
index f98891246b..015724b65d 100644
--- a/tensorflow/compiler/xla/service/cpu/ir_emitter.h
+++ b/tensorflow/compiler/xla/service/cpu/ir_emitter.h
@@ -111,7 +111,7 @@ class IrEmitter : public DfsHloVisitorWithDefault,
// Emit code to map one element according to `map_instr`.
llvm::Value* EmitElementalMap(
const HloMapInstruction& map_instr,
- tensorflow::gtl::ArraySlice<llvm::Value*> elemental_operands,
+ absl::Span<llvm::Value* const> elemental_operands,
absl::string_view name);
protected:
@@ -252,10 +252,9 @@ class IrEmitter : public DfsHloVisitorWithDefault,
//
// `parameters` holds the *scalar values* that need to be passed to the
// callee. The return value is the scalar returned by the callee.
- llvm::Value* EmitThreadLocalCall(
- const HloComputation& callee,
- tensorflow::gtl::ArraySlice<llvm::Value*> parameters,
- absl::string_view name);
+ llvm::Value* EmitThreadLocalCall(const HloComputation& callee,
+ absl::Span<llvm::Value* const> parameters,
+ absl::string_view name);
// Emits a call to a "global" function (e.g. to the computation nested within
// a kWhile or a kCall). Buffer assignment unabiguously assignes buffers to
@@ -271,8 +270,8 @@ class IrEmitter : public DfsHloVisitorWithDefault,
// match and are of one of the given supported types.
Status ElementTypesSameAndSupported(
const HloInstruction& instruction,
- tensorflow::gtl::ArraySlice<const HloInstruction*> operands,
- tensorflow::gtl::ArraySlice<PrimitiveType> supported_types);
+ absl::Span<const HloInstruction* const> operands,
+ absl::Span<const PrimitiveType> supported_types);
// Emit IR to perform a computation for every element in the given target op.
// This produces a series of nested loops (one for each dimension of the op's
@@ -319,10 +318,12 @@ class IrEmitter : public DfsHloVisitorWithDefault,
// concepts that generalize over other vectorizable operations. We should
// consider pulling out these abstractions into a VectorizingIrEmitter or
// something similar.
- StatusOr<bool> EmitVectorizedReduce(
- HloInstruction* reduce, HloInstruction* arg, HloInstruction* init_value,
- tensorflow::gtl::ArraySlice<int64> dimensions, HloComputation* function,
- string* failure_reason);
+ StatusOr<bool> EmitVectorizedReduce(HloInstruction* reduce,
+ HloInstruction* arg,
+ HloInstruction* init_value,
+ absl::Span<const int64> dimensions,
+ HloComputation* function,
+ string* failure_reason);
// We'd like to keep one or two one cache-line's worth of data in registers
// without generating IR with illegal (e.g. excessively large or
@@ -372,16 +373,15 @@ class IrEmitter : public DfsHloVisitorWithDefault,
const ReductionGenerator& reduction_generator,
const llvm_ir::IrArray::Index& output_index,
const ShardedVectorType& accumulator_type, HloInstruction* init_value,
- HloInstruction* arg, tensorflow::gtl::ArraySlice<int64> dimensions,
+ HloInstruction* arg, absl::Span<const int64> dimensions,
unsigned element_alignment);
// Tries to emit a fast concatenate operation using memcpy. Returns true if
// successful, and false on failure. On failure, sets "failure_reason" to a
// string describing why it could not emit a fast concatenate.
- StatusOr<bool> EmitFastConcatenate(
- HloInstruction* concatenate,
- tensorflow::gtl::ArraySlice<HloInstruction*> operands,
- string* failure_reason);
+ StatusOr<bool> EmitFastConcatenate(HloInstruction* concatenate,
+ absl::Span<HloInstruction* const> operands,
+ string* failure_reason);
// Emits LLVM IR to transfer "element_count" elements of type "primitive_type"
// from the address "source" to the address "target".
diff --git a/tensorflow/compiler/xla/service/cpu/ir_function.cc b/tensorflow/compiler/xla/service/cpu/ir_function.cc
index 784045313d..3ecf4b69b7 100644
--- a/tensorflow/compiler/xla/service/cpu/ir_function.cc
+++ b/tensorflow/compiler/xla/service/cpu/ir_function.cc
@@ -200,10 +200,10 @@ llvm::Value* IrFunction::GetDynamicLoopBound(const int64 offset) {
// Returns an array of compute function call arguments (including parameter
// address buffer).
std::vector<llvm::Value*> GetArrayFunctionCallArguments(
- tensorflow::gtl::ArraySlice<llvm::Value*> parameter_addresses,
- llvm::IRBuilder<>* b, absl::string_view name,
- llvm::Value* return_value_buffer, llvm::Value* exec_run_options_arg,
- llvm::Value* temp_buffers_arg, llvm::Value* profile_counters_arg) {
+ absl::Span<llvm::Value* const> parameter_addresses, llvm::IRBuilder<>* b,
+ absl::string_view name, llvm::Value* return_value_buffer,
+ llvm::Value* exec_run_options_arg, llvm::Value* temp_buffers_arg,
+ llvm::Value* profile_counters_arg) {
llvm::Value* parameter_addresses_buffer;
if (parameter_addresses.empty()) {
diff --git a/tensorflow/compiler/xla/service/cpu/ir_function.h b/tensorflow/compiler/xla/service/cpu/ir_function.h
index ee7595f6e9..076ca219bc 100644
--- a/tensorflow/compiler/xla/service/cpu/ir_function.h
+++ b/tensorflow/compiler/xla/service/cpu/ir_function.h
@@ -115,10 +115,10 @@ class IrFunction {
// Returns an array of compute function call argument ir values.
std::vector<llvm::Value*> GetArrayFunctionCallArguments(
- tensorflow::gtl::ArraySlice<llvm::Value*> parameter_addresses,
- llvm::IRBuilder<>* b, absl::string_view name,
- llvm::Value* return_value_buffer, llvm::Value* exec_run_options_arg,
- llvm::Value* temp_buffers_arg, llvm::Value* profile_counters_arg);
+ absl::Span<llvm::Value* const> parameter_addresses, llvm::IRBuilder<>* b,
+ absl::string_view name, llvm::Value* return_value_buffer,
+ llvm::Value* exec_run_options_arg, llvm::Value* temp_buffers_arg,
+ llvm::Value* profile_counters_arg);
// Emits a call to a runtime fork/join function which dispatches parallel
// calls to 'parallel_function' (and joins threads before returning).
diff --git a/tensorflow/compiler/xla/service/cpu/vector_support_library.cc b/tensorflow/compiler/xla/service/cpu/vector_support_library.cc
index 962ea69c09..1bd4b59dd6 100644
--- a/tensorflow/compiler/xla/service/cpu/vector_support_library.cc
+++ b/tensorflow/compiler/xla/service/cpu/vector_support_library.cc
@@ -428,7 +428,7 @@ std::vector<llvm::Value*> TileVariable::Get() const {
return result;
}
-void TileVariable::Set(tensorflow::gtl::ArraySlice<llvm::Value*> value) {
+void TileVariable::Set(absl::Span<llvm::Value* const> value) {
CHECK_EQ(value.size(), storage_.size());
for (int64 i = 0, e = value.size(); i < e; i++) {
storage_[i].Set(value[i]);
diff --git a/tensorflow/compiler/xla/service/cpu/vector_support_library.h b/tensorflow/compiler/xla/service/cpu/vector_support_library.h
index c728f6df0a..3dfe941a3a 100644
--- a/tensorflow/compiler/xla/service/cpu/vector_support_library.h
+++ b/tensorflow/compiler/xla/service/cpu/vector_support_library.h
@@ -324,7 +324,7 @@ class TileVariable {
std::vector<llvm::Value*> initial_value);
std::vector<llvm::Value*> Get() const;
- void Set(tensorflow::gtl::ArraySlice<llvm::Value*> value);
+ void Set(absl::Span<llvm::Value* const> value);
private:
std::vector<VectorVariable> storage_;
diff --git a/tensorflow/compiler/xla/service/cpu/xfeed_manager.cc b/tensorflow/compiler/xla/service/cpu/xfeed_manager.cc
index 47543b2082..b9e47f5aad 100644
--- a/tensorflow/compiler/xla/service/cpu/xfeed_manager.cc
+++ b/tensorflow/compiler/xla/service/cpu/xfeed_manager.cc
@@ -37,7 +37,7 @@ void XfeedQueueManager::Reset() {
}
void XfeedQueueManager::EnqueueBuffersAtomically(
- tensorflow::gtl::ArraySlice<XfeedBuffer*> buffers) {
+ absl::Span<XfeedBuffer* const> buffers) {
tensorflow::mutex_lock l(mu_);
bool was_empty = enqueued_buffers_.empty();
for (XfeedBuffer* b : buffers) {
diff --git a/tensorflow/compiler/xla/service/cpu/xfeed_manager.h b/tensorflow/compiler/xla/service/cpu/xfeed_manager.h
index b4ace23260..fac1722b10 100644
--- a/tensorflow/compiler/xla/service/cpu/xfeed_manager.h
+++ b/tensorflow/compiler/xla/service/cpu/xfeed_manager.h
@@ -63,8 +63,7 @@ class XfeedQueueManager {
// called when the buffer will no longer be accessed by the XfeedManager,
// either as a result of a call to Reset or because the runtime has dequeued
// and used the buffer.
- void EnqueueBuffersAtomically(
- tensorflow::gtl::ArraySlice<XfeedBuffer*> buffers);
+ void EnqueueBuffersAtomically(absl::Span<XfeedBuffer* const> buffers);
// Blocks until the queue is non-empty, then returns the buffer at the head of
// the queue. Sets the current buffer to be the returned buffer. It is an
diff --git a/tensorflow/compiler/xla/service/device_memory_allocator.cc b/tensorflow/compiler/xla/service/device_memory_allocator.cc
index 1d0297cfbf..edbcb25247 100644
--- a/tensorflow/compiler/xla/service/device_memory_allocator.cc
+++ b/tensorflow/compiler/xla/service/device_memory_allocator.cc
@@ -25,7 +25,7 @@ namespace xla {
StreamExecutorMemoryAllocator::StreamExecutorMemoryAllocator(
const se::Platform* platform,
- tensorflow::gtl::ArraySlice<se::StreamExecutor*> stream_executors)
+ absl::Span<se::StreamExecutor* const> stream_executors)
: DeviceMemoryAllocator(platform),
stream_executors_(stream_executors.begin(), stream_executors.end()) {}
diff --git a/tensorflow/compiler/xla/service/device_memory_allocator.h b/tensorflow/compiler/xla/service/device_memory_allocator.h
index d87b86caf0..28a3539373 100644
--- a/tensorflow/compiler/xla/service/device_memory_allocator.h
+++ b/tensorflow/compiler/xla/service/device_memory_allocator.h
@@ -80,7 +80,7 @@ class StreamExecutorMemoryAllocator : public DeviceMemoryAllocator {
public:
StreamExecutorMemoryAllocator(
const se::Platform* platform,
- tensorflow::gtl::ArraySlice<se::StreamExecutor*> stream_executors);
+ absl::Span<se::StreamExecutor* const> stream_executors);
StatusOr<OwningDeviceMemory> Allocate(int device_ordinal, uint64 size,
bool retry_on_failure) override;
diff --git a/tensorflow/compiler/xla/service/elemental_ir_emitter.cc b/tensorflow/compiler/xla/service/elemental_ir_emitter.cc
index def42f9c77..4bb1e071d8 100644
--- a/tensorflow/compiler/xla/service/elemental_ir_emitter.cc
+++ b/tensorflow/compiler/xla/service/elemental_ir_emitter.cc
@@ -856,7 +856,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitErfInv(PrimitiveType prim_type,
auto getFloat = [&](const float f) {
return llvm::ConstantFP::get(b_->getFloatTy(), f);
};
- auto multiply_add = [&](tensorflow::gtl::ArraySlice<float> coefficients,
+ auto multiply_add = [&](absl::Span<const float> coefficients,
llvm::Value* w) {
llvm::Value* p = getFloat(coefficients.front());
coefficients.remove_prefix(1);
@@ -893,7 +893,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitErfInv(PrimitiveType prim_type,
SetToFirstInsertPoint(if_data.true_block, b_);
{
llvm::Value* lw = FSub(w, getFloat(2.5f));
- tensorflow::gtl::ArraySlice<float> lq{
+ absl::Span<const float> lq{
2.81022636e-08f, 3.43273939e-07f, -3.5233877e-06f,
-4.39150654e-06f, 0.00021858087f, -0.00125372503f,
-0.00417768164f, 0.246640727f, 1.50140941f};
@@ -908,7 +908,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitErfInv(PrimitiveType prim_type,
module_, llvm::Intrinsic::sqrt, {b_->getFloatTy()});
llvm::Value* gw = FSub(Call(sqrtf_fn, w), getFloat(3.0f));
- tensorflow::gtl::ArraySlice<float> gq{
+ absl::Span<const float> gq{
-0.000200214257f, 0.000100950558f, 0.00134934322f,
-0.00367342844f, 0.00573950773f, -0.0076224613f,
0.00943887047f, 1.00167406f, 2.83297682f};
diff --git a/tensorflow/compiler/xla/service/elemental_ir_emitter_test.cc b/tensorflow/compiler/xla/service/elemental_ir_emitter_test.cc
index 5ab0756219..1b3be199f6 100644
--- a/tensorflow/compiler/xla/service/elemental_ir_emitter_test.cc
+++ b/tensorflow/compiler/xla/service/elemental_ir_emitter_test.cc
@@ -28,8 +28,7 @@ using absl::nullopt;
class ElementalIrEmitterExecutionTest : public HloTestBase {
protected:
- void RunTest(const string& hlo_text,
- tensorflow::gtl::ArraySlice<Literal*> args) {
+ void RunTest(const string& hlo_text, absl::Span<Literal* const> args) {
HloModuleConfig config;
config.set_debug_options(GetDebugOptionsForTest());
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
diff --git a/tensorflow/compiler/xla/service/executable.cc b/tensorflow/compiler/xla/service/executable.cc
index 78edf918a4..47c56e2f7f 100644
--- a/tensorflow/compiler/xla/service/executable.cc
+++ b/tensorflow/compiler/xla/service/executable.cc
@@ -26,13 +26,12 @@ limitations under the License.
#include "tensorflow/core/lib/strings/proto_serialization.h"
#include "tensorflow/core/platform/env.h"
-using tensorflow::gtl::ArraySlice;
namespace xla {
StatusOr<std::vector<ScopedShapedBuffer>> Executable::ExecuteOnStreams(
- ArraySlice<const ServiceExecutableRunOptions> run_options,
- ArraySlice<ArraySlice<const ShapedBuffer*>> arguments) {
+ absl::Span<const ServiceExecutableRunOptions> run_options,
+ absl::Span<const absl::Span<const ShapedBuffer* const>> arguments) {
TF_RET_CHECK(run_options.size() == arguments.size());
std::vector<ScopedShapedBuffer> return_values;
@@ -63,7 +62,7 @@ StatusOr<std::vector<ScopedShapedBuffer>> Executable::ExecuteOnStreams(
StatusOr<ScopedShapedBuffer> Executable::ExecuteOnStreamWrapper(
const ServiceExecutableRunOptions* run_options, ExecutionProfile* profile,
- ArraySlice<const ShapedBuffer*> arguments) {
+ absl::Span<const ShapedBuffer* const> arguments) {
se::Stream* stream = run_options->stream();
std::unique_ptr<se::Timer> timer;
if (profile != nullptr) {
diff --git a/tensorflow/compiler/xla/service/executable.h b/tensorflow/compiler/xla/service/executable.h
index 6e055edc03..4b8d955b28 100644
--- a/tensorflow/compiler/xla/service/executable.h
+++ b/tensorflow/compiler/xla/service/executable.h
@@ -81,14 +81,14 @@ class Executable {
// Returns a shaped buffer containing the result of the computation.
virtual StatusOr<ScopedShapedBuffer> ExecuteOnStream(
const ServiceExecutableRunOptions* run_options,
- tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
+ absl::Span<const ShapedBuffer* const> arguments,
HloExecutionProfile* hlo_execution_profile) = 0;
// Same as ExecuteOnStream(), but this call is non-blocking and returns as
// soon as all of the operations are enqueued for launch on the stream.
virtual StatusOr<ScopedShapedBuffer> ExecuteAsyncOnStream(
const ServiceExecutableRunOptions* run_options,
- tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments) = 0;
+ absl::Span<const ShapedBuffer* const> arguments) = 0;
// Starts the given program executing on the given stream/executor.
//
@@ -119,11 +119,8 @@ class Executable {
// run_options[i]->stream() and the returned value is at index i of the
// returned vector.
virtual StatusOr<std::vector<ScopedShapedBuffer>> ExecuteOnStreams(
- tensorflow::gtl::ArraySlice<const ServiceExecutableRunOptions>
- run_options,
- tensorflow::gtl::ArraySlice<
- tensorflow::gtl::ArraySlice<const ShapedBuffer*>>
- arguments);
+ absl::Span<const ServiceExecutableRunOptions> run_options,
+ absl::Span<const absl::Span<const ShapedBuffer* const>> arguments);
// Populates `hlo_execution_profile` from `executor`. This is implicit in any
// Execute* API call that takes a hlo_execution_profile argument, but must be
@@ -139,7 +136,7 @@ class Executable {
// given ExecutionProfile if non-null.
StatusOr<ScopedShapedBuffer> ExecuteOnStreamWrapper(
const ServiceExecutableRunOptions* run_options, ExecutionProfile* profile,
- tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments);
+ absl::Span<const ShapedBuffer* const> arguments);
// Returns the ExecutionProfile from executing on the device. This includes
// the number of cycles taken for the computation or the compilation time.
diff --git a/tensorflow/compiler/xla/service/gather_expander.cc b/tensorflow/compiler/xla/service/gather_expander.cc
index 3f1a881372..cb86c98579 100644
--- a/tensorflow/compiler/xla/service/gather_expander.cc
+++ b/tensorflow/compiler/xla/service/gather_expander.cc
@@ -25,7 +25,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/util.h"
namespace xla {
-using tensorflow::gtl::ArraySlice;
static StatusOr<HloInstruction*> TransposeIndexVectorDimToLast(
HloInstruction* start_indices, int64 index_vector_dim) {
@@ -225,7 +224,7 @@ static StatusOr<std::vector<HloInstruction*>> GatherLoopBody(
static StatusOr<HloInstruction*> CreateGatherLoopAccumulatorInitValue(
HloComputation* computation, PrimitiveType element_type,
- ArraySlice<int64> slice_sizes, int64 gather_loop_trip_count,
+ absl::Span<const int64> slice_sizes, int64 gather_loop_trip_count,
const GatherDimensionNumbers& dim_numbers) {
std::vector<int64> accumulator_state_shape_dims;
accumulator_state_shape_dims.reserve(1 + slice_sizes.size());
@@ -244,7 +243,7 @@ static StatusOr<HloInstruction*> CreateGatherLoopAccumulatorInitValue(
// are the major dimensions and the offset dimensions are the minor dimensions.
// Fix this up with a transpose.
static StatusOr<HloInstruction*> PermuteBatchAndOffsetDims(
- HloInstruction* accumulator, ArraySlice<int64> offset_dims,
+ HloInstruction* accumulator, absl::Span<const int64> offset_dims,
int64 output_rank) {
std::vector<int64> permutation;
permutation.reserve(output_rank);
diff --git a/tensorflow/compiler/xla/service/generic_transfer_manager.cc b/tensorflow/compiler/xla/service/generic_transfer_manager.cc
index 0ce2db907b..4ed91ef187 100644
--- a/tensorflow/compiler/xla/service/generic_transfer_manager.cc
+++ b/tensorflow/compiler/xla/service/generic_transfer_manager.cc
@@ -42,8 +42,7 @@ se::Platform::Id GenericTransferManager::PlatformId() const {
}
Status GenericTransferManager::WriteSingleTupleIndexTable(
- se::Stream* stream,
- tensorflow::gtl::ArraySlice<se::DeviceMemoryBase> elements,
+ se::Stream* stream, absl::Span<const se::DeviceMemoryBase> elements,
const Shape& shape, se::DeviceMemoryBase* region) {
TF_RET_CHECK(elements.size() == ShapeUtil::TupleElementCount(shape));
@@ -163,7 +162,7 @@ Status GenericTransferManager::TransferLiteralFromOutfeed(
}
Status GenericTransferManager::ResetDevices(
- tensorflow::gtl::ArraySlice<se::StreamExecutor*>
+ absl::Span<se::StreamExecutor* const>
/*executors*/) {
return Unimplemented(
"Device reset is not yet supported on this platform (b/30481585)");
diff --git a/tensorflow/compiler/xla/service/generic_transfer_manager.h b/tensorflow/compiler/xla/service/generic_transfer_manager.h
index 6c1a21587a..86c8b1c145 100644
--- a/tensorflow/compiler/xla/service/generic_transfer_manager.h
+++ b/tensorflow/compiler/xla/service/generic_transfer_manager.h
@@ -55,15 +55,13 @@ class GenericTransferManager : public TransferManager {
const Shape& literal_shape,
MutableBorrowingLiteral literal) override;
- Status ResetDevices(
- tensorflow::gtl::ArraySlice<se::StreamExecutor*> executors) override;
+ Status ResetDevices(absl::Span<se::StreamExecutor* const> executors) override;
int64 GetByteSizeRequirement(const Shape& shape) const override;
protected:
Status WriteSingleTupleIndexTable(
- se::Stream* stream,
- tensorflow::gtl::ArraySlice<se::DeviceMemoryBase> elements,
+ se::Stream* stream, absl::Span<const se::DeviceMemoryBase> elements,
const Shape& shape, se::DeviceMemoryBase* region) override;
private:
diff --git a/tensorflow/compiler/xla/service/gpu/cudnn_convolution_algorithm_picker.cc b/tensorflow/compiler/xla/service/gpu/cudnn_convolution_algorithm_picker.cc
index dbdf8e7a0e..2af31a52f9 100644
--- a/tensorflow/compiler/xla/service/gpu/cudnn_convolution_algorithm_picker.cc
+++ b/tensorflow/compiler/xla/service/gpu/cudnn_convolution_algorithm_picker.cc
@@ -204,9 +204,8 @@ CudnnConvolutionAlgorithmPicker::PickBestAlgorithm(
if (allocator_ != nullptr) {
allocator = allocator_;
} else {
- se_allocator.emplace(
- stream_exec_->platform(),
- tensorflow::gtl::ArraySlice<se::StreamExecutor*>({stream_exec_}));
+ se_allocator.emplace(stream_exec_->platform(),
+ absl::Span<se::StreamExecutor* const>({stream_exec_}));
allocator = &*se_allocator;
}
diff --git a/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc b/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc
index 57a3a43a6f..c1aaa4bf04 100644
--- a/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc
+++ b/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc
@@ -74,10 +74,8 @@ GpuElementalIrEmitter::GpuElementalIrEmitter(
compute_nested_(std::move(compute_nested)) {}
StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitLibdeviceMathCall(
- const string& callee_name,
- tensorflow::gtl::ArraySlice<llvm::Value*> operands,
- tensorflow::gtl::ArraySlice<PrimitiveType> input_types,
- PrimitiveType output_type) {
+ const string& callee_name, absl::Span<llvm::Value* const> operands,
+ absl::Span<const PrimitiveType> input_types, PrimitiveType output_type) {
// The libdevice math functions differentiate between "double" and "float" by
// appending an 'f' to the function's name. libdevice doesn't have f16 math
// functions, so we convert the operands to f32 before calling the function
@@ -119,10 +117,8 @@ StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitLibdeviceMathCall(
}
StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitLlvmIntrinsicMathCall(
- const string& callee_name,
- tensorflow::gtl::ArraySlice<llvm::Value*> operands,
- tensorflow::gtl::ArraySlice<PrimitiveType> input_types,
- PrimitiveType output_type) {
+ const string& callee_name, absl::Span<llvm::Value* const> operands,
+ absl::Span<const PrimitiveType> input_types, PrimitiveType output_type) {
// llvm intrinsics differentiate between half/float/double functions via
// the suffixes ".f16", ".f32" and ".f64".
string munged_callee = callee_name;
@@ -144,10 +140,8 @@ StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitLlvmIntrinsicMathCall(
}
StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitMathCall(
- const string& callee_name,
- tensorflow::gtl::ArraySlice<llvm::Value*> operands,
- tensorflow::gtl::ArraySlice<PrimitiveType> input_types,
- PrimitiveType output_type) {
+ const string& callee_name, absl::Span<llvm::Value* const> operands,
+ absl::Span<const PrimitiveType> input_types, PrimitiveType output_type) {
// Binary math functions transform are of type [T] -> T.
for (PrimitiveType input_type : input_types) {
if (output_type != input_type) {
@@ -290,11 +284,9 @@ StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitTanh(PrimitiveType prim_type,
}
llvm::Value* GpuElementalIrEmitter::EmitDeviceFunctionCall(
- const string& callee_name,
- tensorflow::gtl::ArraySlice<llvm::Value*> operands,
- tensorflow::gtl::ArraySlice<PrimitiveType> input_types,
- PrimitiveType output_type,
- tensorflow::gtl::ArraySlice<llvm::Attribute::AttrKind> attributes) {
+ const string& callee_name, absl::Span<llvm::Value* const> operands,
+ absl::Span<const PrimitiveType> input_types, PrimitiveType output_type,
+ absl::Span<const llvm::Attribute::AttrKind> attributes) {
std::vector<llvm::Type*> ir_input_types;
for (PrimitiveType input_type : input_types) {
ir_input_types.push_back(
diff --git a/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.h b/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.h
index 91942785d2..43f1f208bf 100644
--- a/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.h
+++ b/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.h
@@ -38,9 +38,9 @@ namespace gpu {
class GpuElementalIrEmitter : public ElementalIrEmitter {
public:
// A NestedComputer computes an element of the output of the given computation
- // given an ArraySlice of its input elements.
+ // given a Span of its input elements.
using NestedComputer = std::function<StatusOr<llvm::Value*>(
- const HloComputation&, tensorflow::gtl::ArraySlice<llvm::Value*>)>;
+ const HloComputation&, absl::Span<llvm::Value* const>)>;
GpuElementalIrEmitter(const HloModuleConfig& hlo_module_config,
llvm::Module* module, llvm::IRBuilder<>* b,
@@ -96,37 +96,29 @@ class GpuElementalIrEmitter : public ElementalIrEmitter {
// Emits IR to call a device function named "callee_name" on the given
// operand. Returns the IR value that represents the return value.
llvm::Value* EmitDeviceFunctionCall(
- const string& callee_name,
- tensorflow::gtl::ArraySlice<llvm::Value*> operands,
- tensorflow::gtl::ArraySlice<PrimitiveType> input_type,
- PrimitiveType output_type,
- tensorflow::gtl::ArraySlice<llvm::Attribute::AttrKind> attributes);
+ const string& callee_name, absl::Span<llvm::Value* const> operands,
+ absl::Span<const PrimitiveType> input_type, PrimitiveType output_type,
+ absl::Span<const llvm::Attribute::AttrKind> attributes);
// Emits IR to call an LLVM intrinsic of type [T] -> T. Adjusts
// callee_name according to T. Returns the IR value that represents the
// return value of the function.
StatusOr<llvm::Value*> EmitLlvmIntrinsicMathCall(
- const string& callee_name,
- tensorflow::gtl::ArraySlice<llvm::Value*> operands,
- tensorflow::gtl::ArraySlice<PrimitiveType> input_types,
- PrimitiveType output_type);
+ const string& callee_name, absl::Span<llvm::Value* const> operands,
+ absl::Span<const PrimitiveType> input_types, PrimitiveType output_type);
// Emits IR to call a libdevice function of type [T] -> T. Adjusts
// callee_name according to T. Returns the IR value that represents the
// return value of the function.
StatusOr<llvm::Value*> EmitLibdeviceMathCall(
- const string& callee_name,
- tensorflow::gtl::ArraySlice<llvm::Value*> operands,
- tensorflow::gtl::ArraySlice<PrimitiveType> input_types,
- PrimitiveType output_type);
+ const string& callee_name, absl::Span<llvm::Value* const> operands,
+ absl::Span<const PrimitiveType> input_types, PrimitiveType output_type);
// Emits IR to call a function of type [T] -> T. Does not munge callee_name.
// Returns the IR value that represents the return value of the function.
StatusOr<llvm::Value*> EmitMathCall(
- const string& callee_name,
- tensorflow::gtl::ArraySlice<llvm::Value*> operands,
- tensorflow::gtl::ArraySlice<PrimitiveType> input_types,
- PrimitiveType output_type);
+ const string& callee_name, absl::Span<llvm::Value* const> operands,
+ absl::Span<const PrimitiveType> input_types, PrimitiveType output_type);
const HloModuleConfig& hlo_module_config_;
NestedComputer compute_nested_;
diff --git a/tensorflow/compiler/xla/service/gpu/fft_thunk.cc b/tensorflow/compiler/xla/service/gpu/fft_thunk.cc
index 11549cdac5..ca4a605af5 100644
--- a/tensorflow/compiler/xla/service/gpu/fft_thunk.cc
+++ b/tensorflow/compiler/xla/service/gpu/fft_thunk.cc
@@ -92,8 +92,7 @@ string FftTypeToString(se::fft::Type type) {
} // namespace
-FftThunk::FftThunk(FftType fft_type,
- tensorflow::gtl::ArraySlice<int64> fft_length,
+FftThunk::FftThunk(FftType fft_type, absl::Span<const int64> fft_length,
const BufferAllocation::Slice& input_buffer,
const BufferAllocation::Slice& output_buffer,
const Shape& input_shape, const Shape& output_shape,
diff --git a/tensorflow/compiler/xla/service/gpu/fft_thunk.h b/tensorflow/compiler/xla/service/gpu/fft_thunk.h
index 4adec7ee54..2be50e08bd 100644
--- a/tensorflow/compiler/xla/service/gpu/fft_thunk.h
+++ b/tensorflow/compiler/xla/service/gpu/fft_thunk.h
@@ -62,7 +62,7 @@ class FftThunk : public Thunk {
public:
// Constructs a thunk for launching an FFT on a stream.
// Semantics of null hlo_instruction argument are as in Thunk.
- FftThunk(FftType fft_type, tensorflow::gtl::ArraySlice<int64> fft_length,
+ FftThunk(FftType fft_type, absl::Span<const int64> fft_length,
const BufferAllocation::Slice& input_buffer,
const BufferAllocation::Slice& output_buffer,
const Shape& input_shape, const Shape& output_shape,
diff --git a/tensorflow/compiler/xla/service/gpu/gpu_executable.cc b/tensorflow/compiler/xla/service/gpu/gpu_executable.cc
index 71a02e70df..31a9f9b1be 100644
--- a/tensorflow/compiler/xla/service/gpu/gpu_executable.cc
+++ b/tensorflow/compiler/xla/service/gpu/gpu_executable.cc
@@ -234,7 +234,7 @@ GpuExecutable::ResolveConstantGlobals(se::StreamExecutor* executor) {
StatusOr<ScopedShapedBuffer> GpuExecutable::ExecuteOnStream(
const ServiceExecutableRunOptions* run_options,
- tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
+ absl::Span<const ShapedBuffer* const> arguments,
HloExecutionProfile* hlo_execution_profile) {
DeviceMemoryAllocator* memory_allocator = run_options->allocator();
@@ -325,7 +325,7 @@ StatusOr<ScopedShapedBuffer> GpuExecutable::ExecuteOnStream(
StatusOr<ScopedShapedBuffer> GpuExecutable::ExecuteAsyncOnStream(
const ServiceExecutableRunOptions* run_options,
- tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments) {
+ absl::Span<const ShapedBuffer* const> arguments) {
// TODO(b/30671675): Implement asynchronous execution mode.
return Unimplemented(
"Asynchronous execution on stream is not yet supported on GPU.");
diff --git a/tensorflow/compiler/xla/service/gpu/gpu_executable.h b/tensorflow/compiler/xla/service/gpu/gpu_executable.h
index 627a05e240..b3765adf5e 100644
--- a/tensorflow/compiler/xla/service/gpu/gpu_executable.h
+++ b/tensorflow/compiler/xla/service/gpu/gpu_executable.h
@@ -78,12 +78,12 @@ class GpuExecutable : public Executable {
// match the compute capability passed to this object's constructor.
StatusOr<ScopedShapedBuffer> ExecuteOnStream(
const ServiceExecutableRunOptions* run_options,
- tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
+ absl::Span<const ShapedBuffer* const> arguments,
HloExecutionProfile* hlo_execution_profile) override;
StatusOr<ScopedShapedBuffer> ExecuteAsyncOnStream(
const ServiceExecutableRunOptions* run_options,
- tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments) override;
+ absl::Span<const ShapedBuffer* const> arguments) override;
private:
// If `block_host_until_done` is false, execution will not block the host
diff --git a/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.cc b/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.cc
index 0e205b9c02..51627402b4 100644
--- a/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.cc
+++ b/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.cc
@@ -35,8 +35,8 @@ using absl::StrAppend;
using absl::StrCat;
void HloToIrBindings::EmitBasePointersForHlos(
- tensorflow::gtl::ArraySlice<const HloInstruction*> io_hlos,
- tensorflow::gtl::ArraySlice<const HloInstruction*> non_io_hlos) {
+ absl::Span<const HloInstruction* const> io_hlos,
+ absl::Span<const HloInstruction* const> non_io_hlos) {
// I/O HLOs are bound to the arguments of the current IR function. I.e.,
//
// void IrFunction(io_0, io_1, ..., io_{m-1}, temp_buffer_base) {
diff --git a/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.h b/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.h
index eee40b0e91..5b05ed812e 100644
--- a/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.h
+++ b/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.h
@@ -45,8 +45,8 @@ class HloToIrBindings {
alias_analysis_(module, *buffer_assignment_, &b_->getContext()) {}
void EmitBasePointersForHlos(
- tensorflow::gtl::ArraySlice<const HloInstruction*> io_hlos,
- tensorflow::gtl::ArraySlice<const HloInstruction*> non_io_hlos);
+ absl::Span<const HloInstruction* const> io_hlos,
+ absl::Span<const HloInstruction* const> non_io_hlos);
// Rebinds the given HLO to the LLVM IR value that represent its address.
void BindHloToIrValue(const HloInstruction& hlo, llvm::Value* ir_value,
diff --git a/tensorflow/compiler/xla/service/gpu/ir_emission_utils.cc b/tensorflow/compiler/xla/service/gpu/ir_emission_utils.cc
index f544bcc919..9c90f4d46b 100644
--- a/tensorflow/compiler/xla/service/gpu/ir_emission_utils.cc
+++ b/tensorflow/compiler/xla/service/gpu/ir_emission_utils.cc
@@ -216,7 +216,7 @@ bool IsReductionToVector(const HloInstruction& reduce) {
// "i32 vprintf(i8* fmt, arguments_type* arguments)" in the driver; see
// http://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability/index.html#system-calls
llvm::Value* EmitPrintf(absl::string_view fmt,
- tensorflow::gtl::ArraySlice<llvm::Value*> arguments,
+ absl::Span<llvm::Value* const> arguments,
llvm::IRBuilder<>* builder) {
std::vector<llvm::Type*> argument_types;
for (auto argument : arguments) {
diff --git a/tensorflow/compiler/xla/service/gpu/ir_emission_utils.h b/tensorflow/compiler/xla/service/gpu/ir_emission_utils.h
index a35e250101..d242897e16 100644
--- a/tensorflow/compiler/xla/service/gpu/ir_emission_utils.h
+++ b/tensorflow/compiler/xla/service/gpu/ir_emission_utils.h
@@ -127,7 +127,7 @@ bool IsReductionToVector(const HloInstruction& reduce);
// Emits call to "vprintf" with given format and arguments.
llvm::Value* EmitPrintf(absl::string_view fmt,
- tensorflow::gtl::ArraySlice<llvm::Value*> arguments,
+ absl::Span<llvm::Value* const> arguments,
llvm::IRBuilder<>* builder);
// Emits code to shuffle data between threads of a warp. This has the same
diff --git a/tensorflow/compiler/xla/service/gpu/ir_emitter.cc b/tensorflow/compiler/xla/service/gpu/ir_emitter.cc
index bdf6aadde6..ffca5d6549 100644
--- a/tensorflow/compiler/xla/service/gpu/ir_emitter.cc
+++ b/tensorflow/compiler/xla/service/gpu/ir_emitter.cc
@@ -141,7 +141,7 @@ Status IrEmitter::HandleTuple(HloInstruction* tuple) {
Status IrEmitter::EmitCallToNestedComputation(
const HloComputation& nested_computation,
- tensorflow::gtl::ArraySlice<llvm::Value*> operands, llvm::Value* output) {
+ absl::Span<llvm::Value* const> operands, llvm::Value* output) {
TF_RET_CHECK(nested_computation.num_parameters() > 0);
llvm::Function*& emitted_function =
computation_to_ir_function_[&nested_computation];
@@ -633,7 +633,7 @@ Status IrEmitter::HandleReduce(HloInstruction* reduce) {
}
auto arg = reduce->operand(0);
auto init_value = reduce->operand(1);
- tensorflow::gtl::ArraySlice<int64> dimensions(reduce->dimensions());
+ absl::Span<const int64> dimensions(reduce->dimensions());
HloComputation* function = reduce->to_apply();
return EmitTargetElementLoop(
*reduce,
@@ -748,7 +748,7 @@ Status IrEmitter::HandleBatchNormGrad(HloInstruction*) {
StatusOr<llvm::Value*> IrEmitter::ComputeNestedElement(
const HloComputation& computation,
- tensorflow::gtl::ArraySlice<llvm::Value*> parameter_elements) {
+ absl::Span<llvm::Value* const> parameter_elements) {
llvm::Value* return_buffer = llvm_ir::EmitAllocaAtFunctionEntry(
llvm_ir::PrimitiveTypeToIrType(
computation.root_instruction()->shape().element_type(), module_),
diff --git a/tensorflow/compiler/xla/service/gpu/ir_emitter.h b/tensorflow/compiler/xla/service/gpu/ir_emitter.h
index 3673b9f58d..bc2b04ace5 100644
--- a/tensorflow/compiler/xla/service/gpu/ir_emitter.h
+++ b/tensorflow/compiler/xla/service/gpu/ir_emitter.h
@@ -143,9 +143,9 @@ class IrEmitter : public DfsHloVisitorWithDefault,
// Emits a call in IR to the given nested computation with the given operands
// and output. If no IR function has been previously emitted for the
// computation, also emits such a function.
- Status EmitCallToNestedComputation(
- const HloComputation& nested_computation,
- tensorflow::gtl::ArraySlice<llvm::Value*> operands, llvm::Value* output);
+ Status EmitCallToNestedComputation(const HloComputation& nested_computation,
+ absl::Span<llvm::Value* const> operands,
+ llvm::Value* output);
// Emits an atomic operation that implements `nested_computation` in the
// sequentially consistent memory model. `output_address` and `source_address`
@@ -199,7 +199,7 @@ class IrEmitter : public DfsHloVisitorWithDefault,
StatusOr<llvm::Value*> ComputeNestedElement(
const HloComputation& computation,
- tensorflow::gtl::ArraySlice<llvm::Value*> parameter_elements);
+ absl::Span<llvm::Value* const> parameter_elements);
// Emits an atomic operation that implements `nested_computation` in the
// sequentially consistent memory model. `output_address` and `source_address`
diff --git a/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc b/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc
index 860dd0b50f..3ab79197e2 100644
--- a/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc
+++ b/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc
@@ -94,7 +94,6 @@ using absl::optional;
using absl::StrCat;
using llvm_ir::IrArray;
using llvm_ir::IrName;
-using tensorflow::gtl::ArraySlice;
// If a dimensions is smaller than this, untiled transposition may be more
// efficient.
@@ -176,7 +175,7 @@ Status IrEmitterUnnested::Postprocess(HloInstruction* hlo) {
llvm::Function* IrEmitterUnnested::BuildKernelPrototype(
const HloInstruction& inst,
- tensorflow::gtl::ArraySlice<const BufferAllocation*> args) {
+ absl::Span<const BufferAllocation* const> args) {
// Compute the kernel name. The opcode string may contain "-" which cannot be
// in a PTX function name, so sanitize the name before uniquifying it.
string kernel_name = ir_emitter_context_->name_uniquer()->GetUniqueName(
@@ -556,10 +555,10 @@ Status IrEmitterUnnested::HandleFusion(HloInstruction* fusion) {
}
VLOG(3) << "Emitting fused reduction to vector: " << fusion->ToString();
std::vector<std::unique_ptr<Thunk>> thunks;
- ArraySlice<HloInstruction*> output_instructions =
+ absl::Span<HloInstruction* const> output_instructions =
root->opcode() == HloOpcode::kTuple
? root->operands()
- : ArraySlice<HloInstruction*>(&root, 1);
+ : absl::Span<HloInstruction* const>(&root, 1);
// For multi-output fusion emit an initializer for each tuple element.
// Otherwise it's sufficient to just initialize the single output.
@@ -718,8 +717,7 @@ Status IrEmitterUnnested::HandleCopy(HloInstruction* copy) {
Status IrEmitterUnnested::EmitExtraOutputsForReduce(
const HloInstruction* reduce, const IrArray::Index& index,
- tensorflow::gtl::ArraySlice<
- std::pair<llvm_ir::ElementGenerator, ShapeIndex>>
+ absl::Span<const std::pair<llvm_ir::ElementGenerator, ShapeIndex>>
extra_output_gens) {
for (int i = 0; i != extra_output_gens.size(); ++i) {
const HloInstruction* output = reduce->parent()->FusionInstruction();
@@ -736,12 +734,11 @@ Status IrEmitterUnnested::EmitExtraOutputsForReduce(
Status IrEmitterUnnested::EmitReductionToScalar(
HloInstruction* reduce, const Shape& input_shape,
- tensorflow::gtl::ArraySlice<llvm_ir::ElementGenerator> input_gens,
- tensorflow::gtl::ArraySlice<llvm_ir::ElementGenerator> init_value_gens,
- tensorflow::gtl::ArraySlice<HloComputation*> reducers,
- tensorflow::gtl::ArraySlice<ShapeIndex> reduce_output_shapes,
- tensorflow::gtl::ArraySlice<
- std::pair<llvm_ir::ElementGenerator, ShapeIndex>>
+ absl::Span<const llvm_ir::ElementGenerator> input_gens,
+ absl::Span<const llvm_ir::ElementGenerator> init_value_gens,
+ absl::Span<HloComputation* const> reducers,
+ absl::Span<const ShapeIndex> reduce_output_shapes,
+ absl::Span<const std::pair<llvm_ir::ElementGenerator, ShapeIndex>>
extra_output_gens) {
// Number of elements processed by a single thread.
constexpr int64 kTileSize = 16;
@@ -951,12 +948,11 @@ Status IrEmitterUnnested::EmitReductionToScalar(
Status IrEmitterUnnested::EmitColumnReduction(
int64 height, int64 width, HloInstruction* reduce, const Shape& input_shape,
- tensorflow::gtl::ArraySlice<llvm_ir::ElementGenerator> input_gens,
- tensorflow::gtl::ArraySlice<llvm_ir::ElementGenerator> init_value_gens,
- tensorflow::gtl::ArraySlice<HloComputation*> reducers,
- tensorflow::gtl::ArraySlice<ShapeIndex> reduce_output_shapes,
- tensorflow::gtl::ArraySlice<
- std::pair<llvm_ir::ElementGenerator, ShapeIndex>>
+ absl::Span<const llvm_ir::ElementGenerator> input_gens,
+ absl::Span<const llvm_ir::ElementGenerator> init_value_gens,
+ absl::Span<HloComputation* const> reducers,
+ absl::Span<const ShapeIndex> reduce_output_shapes,
+ absl::Span<const std::pair<llvm_ir::ElementGenerator, ShapeIndex>>
extra_output_gens) {
// Divide the input matrix into tiles of size KxL. For example, when the
// input matrix is 4x4, K=2, and L=1 the tiled matrix looks like
@@ -1240,12 +1236,11 @@ static std::pair<int64, int64> ComputeTilingSchemeForReduction(
Status IrEmitterUnnested::EmitRowReduction(
int64 depth, int64 height, int64 width, HloInstruction* reduce,
const Shape& input_shape,
- tensorflow::gtl::ArraySlice<llvm_ir::ElementGenerator> input_gens,
- tensorflow::gtl::ArraySlice<llvm_ir::ElementGenerator> init_value_gens,
- tensorflow::gtl::ArraySlice<HloComputation*> reducers,
- tensorflow::gtl::ArraySlice<ShapeIndex> reduce_output_shapes,
- tensorflow::gtl::ArraySlice<
- std::pair<llvm_ir::ElementGenerator, ShapeIndex>>
+ absl::Span<const llvm_ir::ElementGenerator> input_gens,
+ absl::Span<const llvm_ir::ElementGenerator> init_value_gens,
+ absl::Span<HloComputation* const> reducers,
+ absl::Span<const ShapeIndex> reduce_output_shapes,
+ absl::Span<const std::pair<llvm_ir::ElementGenerator, ShapeIndex>>
extra_output_gens) {
// A naive algorithm is:
// 1. Divide the x dimension of the input tensor into tiles of size 1x1xX.
@@ -1593,13 +1588,12 @@ Status IrEmitterUnnested::EmitRowReduction(
// elementwise.
Status IrEmitterUnnested::EmitReductionToVector(
HloInstruction* reduce, const Shape& input_shape,
- tensorflow::gtl::ArraySlice<llvm_ir::ElementGenerator> input_gens,
- tensorflow::gtl::ArraySlice<llvm_ir::ElementGenerator> init_value_gens,
- tensorflow::gtl::ArraySlice<int64> dimensions_to_reduce,
- tensorflow::gtl::ArraySlice<HloComputation*> reducers,
- tensorflow::gtl::ArraySlice<ShapeIndex> reduce_output_shapes,
- tensorflow::gtl::ArraySlice<
- std::pair<llvm_ir::ElementGenerator, ShapeIndex>>
+ absl::Span<const llvm_ir::ElementGenerator> input_gens,
+ absl::Span<const llvm_ir::ElementGenerator> init_value_gens,
+ absl::Span<const int64> dimensions_to_reduce,
+ absl::Span<HloComputation* const> reducers,
+ absl::Span<const ShapeIndex> reduce_output_shapes,
+ absl::Span<const std::pair<llvm_ir::ElementGenerator, ShapeIndex>>
extra_output_gens) {
// This emission requires "reduce" to have an input layout. It is either set
// by LayoutAssignment (for a top-level kReduce) or by InstructionFusion (for
@@ -1694,7 +1688,7 @@ Status IrEmitterUnnested::HandleReduce(HloInstruction* reduce) {
}
auto input = reduce->operand(0);
auto init_value = reduce->operand(1);
- tensorflow::gtl::ArraySlice<int64> dimensions_to_reduce(reduce->dimensions());
+ absl::Span<const int64> dimensions_to_reduce(reduce->dimensions());
HloComputation* reducer = reduce->to_apply();
// HandleReduce specializes reduction from a multi-dimensional array to a 1D
// array. The specialized version requires an initializer thunk that
@@ -2570,7 +2564,7 @@ StatusOr<std::unique_ptr<Thunk>> IrEmitterUnnested::BuildInitializerThunk(
// Are all the bytes of this scalar equal to 0? If so, we can create a
// MemzeroThunk.
- ArraySlice<uint8> literal_bytes(
+ absl::Span<const uint8> literal_bytes(
reinterpret_cast<const uint8*>(literal.untyped_data()), num_bytes);
if (absl::c_all_of(literal_bytes, [](uint8 byte) { return byte == 0; })) {
return {absl::make_unique<MemzeroThunk>(GetAllocationSlice(*hlo, index),
@@ -2880,7 +2874,7 @@ int IrEmitterUnnested::ConstructIrArrayForInputs(
int IrEmitterUnnested::ConstructOutputReducedShapeAndCastOutputIrArrayToShape(
const HloInstruction& hlo, const std::vector<IrArray>& output_arrays,
- tensorflow::gtl::ArraySlice<int64> reduced_output_dims,
+ absl::Span<const int64> reduced_output_dims,
std::vector<Shape>* output_reduced_shapes,
std::vector<IrArray>* output_in_reduced_shape_arrays) {
int64 num_outputs = 1;
@@ -2907,7 +2901,7 @@ int IrEmitterUnnested::ConstructOutputReducedShapeAndCastOutputIrArrayToShape(
int IrEmitterUnnested::ConstructInputReducedShapeAndCastInputIrArrayToShape(
const HloInstruction& hlo, const std::vector<IrArray>& param_arrays,
const std::vector<llvm::Value*>& param_buffers,
- tensorflow::gtl::ArraySlice<int64> reduced_output_dims,
+ absl::Span<const int64> reduced_output_dims,
std::vector<Shape>* param_reduced_shapes,
std::vector<IrArray>* param_in_reduced_shape_arrays) {
int64 num_params = hlo.operands().size();
@@ -3048,8 +3042,8 @@ void EmitTiledElementalCodeWithBoundsCheck(
// TODO(b/33320379): Here each block transposes 1 tile. It may be more efficient
// to launch fewer blocks so each transposes many tiles.
LaunchDimensions IrEmitterUnnested::EmitHlo021Tile(
- HloInstruction* hlo, tensorflow::gtl::ArraySlice<int64> reduced_output_dims,
- tensorflow::gtl::ArraySlice<int64> tiled_param_ids) {
+ HloInstruction* hlo, absl::Span<const int64> reduced_output_dims,
+ absl::Span<const int64> tiled_param_ids) {
// Parameters for the tiling algorithm.
constexpr int64 kTileSize = 32;
constexpr int64 kNumRows = 4;
diff --git a/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.h b/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.h
index 5254419907..084462330e 100644
--- a/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.h
+++ b/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.h
@@ -105,13 +105,12 @@ class IrEmitterUnnested : public IrEmitter {
// This kernel takes as arguments pointers to the given buffer allocations.
llvm::Function* BuildKernelPrototype(
const HloInstruction& inst,
- tensorflow::gtl::ArraySlice<const BufferAllocation*> args);
+ absl::Span<const BufferAllocation* const> args);
// Helper for writing extra outputs from inside a reduce kernel.
Status EmitExtraOutputsForReduce(
const HloInstruction* reduce, const llvm_ir::IrArray::Index& index,
- tensorflow::gtl::ArraySlice<
- std::pair<llvm_ir::ElementGenerator, ShapeIndex>>
+ absl::Span<const std::pair<llvm_ir::ElementGenerator, ShapeIndex>>
extra_output_gens);
// EmitColumnReduction and EmitRowReduction emit code for column and row
@@ -127,12 +126,11 @@ class IrEmitterUnnested : public IrEmitter {
Status EmitColumnReduction(
int64 height, int64 width, HloInstruction* reduce,
const Shape& input_shape,
- tensorflow::gtl::ArraySlice<llvm_ir::ElementGenerator> input_gens,
- tensorflow::gtl::ArraySlice<llvm_ir::ElementGenerator> init_value_gens,
- tensorflow::gtl::ArraySlice<HloComputation*> reducers,
- tensorflow::gtl::ArraySlice<ShapeIndex> reduce_output_shapes,
- tensorflow::gtl::ArraySlice<
- std::pair<llvm_ir::ElementGenerator, ShapeIndex>>
+ absl::Span<const llvm_ir::ElementGenerator> input_gens,
+ absl::Span<const llvm_ir::ElementGenerator> init_value_gens,
+ absl::Span<HloComputation* const> reducers,
+ absl::Span<const ShapeIndex> reduce_output_shapes,
+ absl::Span<const std::pair<llvm_ir::ElementGenerator, ShapeIndex>>
extra_output_gens);
// Emits code that reduces a 3D tensor of shape [depth x height x width] to a
@@ -143,23 +141,21 @@ class IrEmitterUnnested : public IrEmitter {
Status EmitRowReduction(
int64 depth, int64 height, int64 width, HloInstruction* reduce,
const Shape& input_shape,
- tensorflow::gtl::ArraySlice<llvm_ir::ElementGenerator> input_gens,
- tensorflow::gtl::ArraySlice<llvm_ir::ElementGenerator> init_value_gens,
- tensorflow::gtl::ArraySlice<HloComputation*> reducers,
- tensorflow::gtl::ArraySlice<ShapeIndex> reduce_output_shapes,
- tensorflow::gtl::ArraySlice<
- std::pair<llvm_ir::ElementGenerator, ShapeIndex>>
+ absl::Span<const llvm_ir::ElementGenerator> input_gens,
+ absl::Span<const llvm_ir::ElementGenerator> init_value_gens,
+ absl::Span<HloComputation* const> reducers,
+ absl::Span<const ShapeIndex> reduce_output_shapes,
+ absl::Span<const std::pair<llvm_ir::ElementGenerator, ShapeIndex>>
extra_output_gens);
// Emits code that reduces a tensor of arbitrary rank to a scalar.
Status EmitReductionToScalar(
HloInstruction* reduce, const Shape& input_shape,
- tensorflow::gtl::ArraySlice<llvm_ir::ElementGenerator> input_gens,
- tensorflow::gtl::ArraySlice<llvm_ir::ElementGenerator> init_value_gens,
- tensorflow::gtl::ArraySlice<HloComputation*> reducers,
- tensorflow::gtl::ArraySlice<ShapeIndex> reduce_output_shapes,
- tensorflow::gtl::ArraySlice<
- std::pair<llvm_ir::ElementGenerator, ShapeIndex>>
+ absl::Span<const llvm_ir::ElementGenerator> input_gens,
+ absl::Span<const llvm_ir::ElementGenerator> init_value_gens,
+ absl::Span<HloComputation* const> reducers,
+ absl::Span<const ShapeIndex> reduce_output_shapes,
+ absl::Span<const std::pair<llvm_ir::ElementGenerator, ShapeIndex>>
extra_output_gens);
// Figures out whether `reduce` is a row or column reduction, and which
@@ -180,13 +176,12 @@ class IrEmitterUnnested : public IrEmitter {
// Prerequisite: `IsReductionToVector(*reduce)`
Status EmitReductionToVector(
HloInstruction* reduce, const Shape& input_shape,
- tensorflow::gtl::ArraySlice<llvm_ir::ElementGenerator> input_gens,
- tensorflow::gtl::ArraySlice<llvm_ir::ElementGenerator> init_value_gens,
- tensorflow::gtl::ArraySlice<int64> dimensions_to_reduce,
- tensorflow::gtl::ArraySlice<HloComputation*> reducers,
- tensorflow::gtl::ArraySlice<ShapeIndex> reduce_output_shapes,
- tensorflow::gtl::ArraySlice<
- std::pair<llvm_ir::ElementGenerator, ShapeIndex>>
+ absl::Span<const llvm_ir::ElementGenerator> input_gens,
+ absl::Span<const llvm_ir::ElementGenerator> init_value_gens,
+ absl::Span<const int64> dimensions_to_reduce,
+ absl::Span<HloComputation* const> reducers,
+ absl::Span<const ShapeIndex> reduce_output_shapes,
+ absl::Span<const std::pair<llvm_ir::ElementGenerator, ShapeIndex>>
extra_output_gens);
// Returns true if a 0-2-1 tiling algorithm is already used to emit the kernel
@@ -195,10 +190,9 @@ class IrEmitterUnnested : public IrEmitter {
// Emits a kernel for the hlo instruction using a 0-2-1 tiling algorithm and
// returns the launch dimensions for the kernel. This is a helper to support
// the implementation of CheckAndEmitHloWithTile021.
- LaunchDimensions EmitHlo021Tile(
- HloInstruction* hlo,
- tensorflow::gtl::ArraySlice<int64> reduced_output_dims,
- tensorflow::gtl::ArraySlice<int64> tiled_param_ids);
+ LaunchDimensions EmitHlo021Tile(HloInstruction* hlo,
+ absl::Span<const int64> reduced_output_dims,
+ absl::Span<const int64> tiled_param_ids);
// Generates the IrArray for each output of hlo and returns the number of
// outputs.
int ConstructIrArrayForOutputs(const HloInstruction& hlo,
@@ -214,7 +208,7 @@ class IrEmitterUnnested : public IrEmitter {
int ConstructOutputReducedShapeAndCastOutputIrArrayToShape(
const HloInstruction& hlo,
const std::vector<llvm_ir::IrArray>& output_arrays,
- tensorflow::gtl::ArraySlice<int64> reduced_output_dims,
+ absl::Span<const int64> reduced_output_dims,
std::vector<Shape>* output_reduced_shapes,
std::vector<llvm_ir::IrArray>* output_in_reduced_shape_arrays);
// For each input of the `hlo` instruction, checks its value in
@@ -226,7 +220,7 @@ class IrEmitterUnnested : public IrEmitter {
const HloInstruction& hlo,
const std::vector<llvm_ir::IrArray>& param_arrays,
const std::vector<llvm::Value*>& param_buffers,
- tensorflow::gtl::ArraySlice<int64> reduced_output_dims,
+ absl::Span<const int64> reduced_output_dims,
std::vector<Shape>* param_reduced_shapes,
std::vector<llvm_ir::IrArray>* param_in_reduced_shape_arrays);
diff --git a/tensorflow/compiler/xla/service/gpu/kernel_thunk.cc b/tensorflow/compiler/xla/service/gpu/kernel_thunk.cc
index 3259eaa2a2..878b0b96a1 100644
--- a/tensorflow/compiler/xla/service/gpu/kernel_thunk.cc
+++ b/tensorflow/compiler/xla/service/gpu/kernel_thunk.cc
@@ -27,10 +27,10 @@ limitations under the License.
namespace xla {
namespace gpu {
-KernelThunk::KernelThunk(
- tensorflow::gtl::ArraySlice<const BufferAllocation*> args,
- const string& kernel_name, const HloInstruction* hlo_instruction,
- int unroll_factor)
+KernelThunk::KernelThunk(absl::Span<const BufferAllocation* const> args,
+ const string& kernel_name,
+ const HloInstruction* hlo_instruction,
+ int unroll_factor)
: Thunk(Kind::kKernel, hlo_instruction),
args_(args.begin(), args.end()),
kernel_name_(kernel_name),
diff --git a/tensorflow/compiler/xla/service/gpu/kernel_thunk.h b/tensorflow/compiler/xla/service/gpu/kernel_thunk.h
index d751de50ad..480f473037 100644
--- a/tensorflow/compiler/xla/service/gpu/kernel_thunk.h
+++ b/tensorflow/compiler/xla/service/gpu/kernel_thunk.h
@@ -47,7 +47,7 @@ class KernelThunk : public Thunk {
// Constructs a thunk for the given kernel.
//
// `hlo_instruction` is as in Thunk. Other arguments are as the class members.
- KernelThunk(tensorflow::gtl::ArraySlice<const BufferAllocation*> args,
+ KernelThunk(absl::Span<const BufferAllocation* const> args,
const string& kernel_name, const HloInstruction* hlo_instruction,
int unroll_factor);
KernelThunk(const KernelThunk&) = delete;
diff --git a/tensorflow/compiler/xla/service/gpu/pad_for_tensor_cores.cc b/tensorflow/compiler/xla/service/gpu/pad_for_tensor_cores.cc
index 79f7d31816..fa84d77223 100644
--- a/tensorflow/compiler/xla/service/gpu/pad_for_tensor_cores.cc
+++ b/tensorflow/compiler/xla/service/gpu/pad_for_tensor_cores.cc
@@ -23,7 +23,6 @@ limitations under the License.
namespace xla {
namespace gpu {
-using tensorflow::gtl::ArraySlice;
// We want the input/output feature counts of an f16 conv to be factors of 8,
// because without this cudnn can't use tensor cores on the conv.
@@ -42,7 +41,7 @@ static constexpr double kMaxBytesTouchedIncrease = 1.2;
// Pads the given dimensions in the given shape up to a multiple of
// kDesiredNumFeaturesFactor.
-static Shape PadShape(Shape s, ArraySlice<int64> dims) {
+static Shape PadShape(Shape s, absl::Span<const int64> dims) {
for (int64 dim : dims) {
int64 dim_to_pad_size = s.dimensions(dim);
int64 new_dim_to_pad_size =
diff --git a/tensorflow/compiler/xla/service/gpu/parallel_loop_emitter.cc b/tensorflow/compiler/xla/service/gpu/parallel_loop_emitter.cc
index ca57cacb98..8154d75d23 100644
--- a/tensorflow/compiler/xla/service/gpu/parallel_loop_emitter.cc
+++ b/tensorflow/compiler/xla/service/gpu/parallel_loop_emitter.cc
@@ -40,7 +40,7 @@ ParallelLoopEmitter::ParallelLoopEmitter(
ParallelLoopEmitter::ParallelLoopEmitter(
const llvm_ir::ElementGenerator& target_element_generator,
- tensorflow::gtl::ArraySlice<llvm_ir::IrArray> target_arrays,
+ absl::Span<const llvm_ir::IrArray> target_arrays,
const LaunchDimensions& launch_dimensions, llvm::IRBuilder<>* b,
int unroll_factor)
: LoopEmitter(target_element_generator, target_arrays, b),
diff --git a/tensorflow/compiler/xla/service/gpu/parallel_loop_emitter.h b/tensorflow/compiler/xla/service/gpu/parallel_loop_emitter.h
index cc7da2e73b..f32ea1ce4c 100644
--- a/tensorflow/compiler/xla/service/gpu/parallel_loop_emitter.h
+++ b/tensorflow/compiler/xla/service/gpu/parallel_loop_emitter.h
@@ -47,11 +47,10 @@ class ParallelLoopEmitter : public llvm_ir::LoopEmitter {
//
// This is used in multi-output fusion. target_element_generator should
// produce a struct with N elements, one for each of target_arrays.
- ParallelLoopEmitter(
- const llvm_ir::ElementGenerator& target_element_generator,
- tensorflow::gtl::ArraySlice<llvm_ir::IrArray> target_arrays,
- const LaunchDimensions& launch_dimensions, llvm::IRBuilder<>* b,
- int unroll_factor = 1);
+ ParallelLoopEmitter(const llvm_ir::ElementGenerator& target_element_generator,
+ absl::Span<const llvm_ir::IrArray> target_arrays,
+ const LaunchDimensions& launch_dimensions,
+ llvm::IRBuilder<>* b, int unroll_factor = 1);
ParallelLoopEmitter(const ParallelLoopEmitter&) = delete;
ParallelLoopEmitter& operator=(const ParallelLoopEmitter&) = delete;
diff --git a/tensorflow/compiler/xla/service/gpu/tuple_thunk.h b/tensorflow/compiler/xla/service/gpu/tuple_thunk.h
index 2d5735d6c4..a3a03b53f8 100644
--- a/tensorflow/compiler/xla/service/gpu/tuple_thunk.h
+++ b/tensorflow/compiler/xla/service/gpu/tuple_thunk.h
@@ -34,8 +34,7 @@ namespace gpu {
// issue (b/31336476).
class TupleThunk : public Thunk {
public:
- TupleThunk(tensorflow::gtl::ArraySlice<BufferAllocation::Slice>
- tuple_element_buffers,
+ TupleThunk(absl::Span<const BufferAllocation::Slice> tuple_element_buffers,
const BufferAllocation::Slice& dest_buffer,
const HloInstruction* hlo_instruction)
: Thunk(Kind::kTuple, hlo_instruction),
diff --git a/tensorflow/compiler/xla/service/hlo_buffer.h b/tensorflow/compiler/xla/service/hlo_buffer.h
index 4873463b2e..a88c87e46c 100644
--- a/tensorflow/compiler/xla/service/hlo_buffer.h
+++ b/tensorflow/compiler/xla/service/hlo_buffer.h
@@ -84,7 +84,7 @@ class HloBuffer {
return a->id() == b->id();
}
- HloBuffer(Id id, tensorflow::gtl::ArraySlice<const HloValue*> values)
+ HloBuffer(Id id, absl::Span<const HloValue* const> values)
: id_(id), values_(values.begin(), values.end()) {}
// Return the unique identifier for this HloBuffer.
diff --git a/tensorflow/compiler/xla/service/hlo_computation.cc b/tensorflow/compiler/xla/service/hlo_computation.cc
index c2d0673f49..fe7f2be888 100644
--- a/tensorflow/compiler/xla/service/hlo_computation.cc
+++ b/tensorflow/compiler/xla/service/hlo_computation.cc
@@ -558,7 +558,7 @@ HloComputation::CreateFromProto(
}
void HloComputation::FuseInstructionsInto(
- tensorflow::gtl::ArraySlice<HloInstruction*> instructions_to_fuse,
+ absl::Span<HloInstruction* const> instructions_to_fuse,
HloInstruction* fusion_instruction) {
CHECK_EQ(HloOpcode::kFusion, fusion_instruction->opcode());
HloInstruction* root = instructions_to_fuse.front();
@@ -577,7 +577,7 @@ void HloComputation::FuseInstructionsInto(
}
HloInstruction* HloComputation::CreateFusionInstruction(
- tensorflow::gtl::ArraySlice<HloInstruction*> instructions_to_fuse,
+ absl::Span<HloInstruction* const> instructions_to_fuse,
HloInstruction::FusionKind fusion_kind) {
HloInstruction* root = instructions_to_fuse.front();
HloInstruction* fusion_instruction = AddInstruction(
diff --git a/tensorflow/compiler/xla/service/hlo_computation.h b/tensorflow/compiler/xla/service/hlo_computation.h
index 59016624f7..daafb711fd 100644
--- a/tensorflow/compiler/xla/service/hlo_computation.h
+++ b/tensorflow/compiler/xla/service/hlo_computation.h
@@ -237,7 +237,7 @@ class HloComputation {
// removed if they have no uses after fusion (this is necessarily true for at
// least the root).
HloInstruction* CreateFusionInstruction(
- tensorflow::gtl::ArraySlice<HloInstruction*> instructions_to_fuse,
+ absl::Span<HloInstruction* const> instructions_to_fuse,
HloInstruction::FusionKind fusion_kind);
// Create a deep copy of the given instruction and return the instruction
@@ -385,7 +385,7 @@ class HloComputation {
//
// Pre-condition: fusion_instruction's opcode is kFusion.
void FuseInstructionsInto(
- tensorflow::gtl::ArraySlice<HloInstruction*> instructions_to_fuse,
+ absl::Span<HloInstruction* const> instructions_to_fuse,
HloInstruction* fusion_instruction);
// Internal helper for recursive copying of an instruction. Creates and
diff --git a/tensorflow/compiler/xla/service/hlo_constant_folding_test.cc b/tensorflow/compiler/xla/service/hlo_constant_folding_test.cc
index 7cd1481a8a..07cd1efc12 100644
--- a/tensorflow/compiler/xla/service/hlo_constant_folding_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_constant_folding_test.cc
@@ -105,8 +105,8 @@ TEST_F(HloConstantFoldingTest, ConvertF32ArrayToS64Array) {
TEST_F(HloConstantFoldingTest, Concatenate) {
const struct TestConfig {
int concat_dimension;
- tensorflow::gtl::ArraySlice<int64> dimensions;
- tensorflow::gtl::ArraySlice<int64> concat_sizes;
+ absl::Span<const int64> dimensions;
+ absl::Span<const int64> concat_sizes;
} test_configs[] = {
{1, {11, 0, 7, 5, 9}, {2, 5, 7, 11}},
{3, {1, 4, 17, 0, 8}, {1, 3, 9, 12}},
@@ -196,7 +196,7 @@ TEST_F(HloConstantFoldingTest, TransposeConstantFold) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<F32>::type;
bool matched = true;
root->literal().EachCell<NativeT>(
- [&](tensorflow::gtl::ArraySlice<int64> indices, NativeT value) {
+ [&](absl::Span<const int64> indices, NativeT value) {
std::vector<int64> rindexes = Permute(permutation, indices);
matched = matched && (value == literal_clone->Get<NativeT>(rindexes));
});
diff --git a/tensorflow/compiler/xla/service/hlo_creation_utils.cc b/tensorflow/compiler/xla/service/hlo_creation_utils.cc
index 131846794d..19ffb465c0 100644
--- a/tensorflow/compiler/xla/service/hlo_creation_utils.cc
+++ b/tensorflow/compiler/xla/service/hlo_creation_utils.cc
@@ -24,7 +24,6 @@ limitations under the License.
namespace xla {
using absl::StrCat;
-using tensorflow::gtl::ArraySlice;
StatusOr<HloInstruction*> MakeBinaryHlo(HloOpcode opcode, HloInstruction* lhs,
HloInstruction* rhs) {
@@ -50,9 +49,9 @@ StatusOr<HloInstruction*> MakePadHlo(HloInstruction* operand,
}
StatusOr<HloInstruction*> MakeSliceHlo(HloInstruction* operand,
- ArraySlice<int64> start_indices,
- ArraySlice<int64> limit_indices,
- ArraySlice<int64> strides) {
+ absl::Span<const int64> start_indices,
+ absl::Span<const int64> limit_indices,
+ absl::Span<const int64> strides) {
HloComputation* computation = operand->parent();
TF_ASSIGN_OR_RETURN(Shape slice_shape, ShapeInference::InferSliceShape(
operand->shape(), start_indices,
@@ -74,7 +73,7 @@ StatusOr<HloInstruction*> MakeConvolveHlo(
}
StatusOr<HloInstruction*> MakeTransposeHlo(HloInstruction* operand,
- ArraySlice<int64> dimensions) {
+ absl::Span<const int64> dimensions) {
HloComputation* computation = operand->parent();
TF_ASSIGN_OR_RETURN(
Shape transpose_shape,
@@ -91,15 +90,15 @@ StatusOr<HloInstruction*> MakeReshapeHlo(const Shape& result_shape,
}
StatusOr<HloInstruction*> MakeReshapeHlo(
- ArraySlice<int64> result_shape_dim_bounds, HloInstruction* operand) {
+ absl::Span<const int64> result_shape_dim_bounds, HloInstruction* operand) {
Shape new_shape = ShapeUtil::MakeShape(operand->shape().element_type(),
result_shape_dim_bounds);
return MakeReshapeHlo(new_shape, operand);
}
-StatusOr<HloInstruction*> MakeDynamicSliceHlo(HloInstruction* operand,
- HloInstruction* start_indices,
- ArraySlice<int64> slice_sizes) {
+StatusOr<HloInstruction*> MakeDynamicSliceHlo(
+ HloInstruction* operand, HloInstruction* start_indices,
+ absl::Span<const int64> slice_sizes) {
HloComputation* computation = operand->parent();
CHECK_EQ(computation, start_indices->parent());
TF_ASSIGN_OR_RETURN(
@@ -125,8 +124,8 @@ StatusOr<HloInstruction*> MakeDynamicUpdateSliceHlo(
}
StatusOr<HloInstruction*> MakeBroadcastHlo(
- HloInstruction* operand, ArraySlice<int64> broadcast_dimensions,
- ArraySlice<int64> result_shape_bounds) {
+ HloInstruction* operand, absl::Span<const int64> broadcast_dimensions,
+ absl::Span<const int64> result_shape_bounds) {
HloComputation* computation = operand->parent();
Shape broadcast_shape = ShapeUtil::MakeShape(operand->shape().element_type(),
result_shape_bounds);
@@ -146,8 +145,8 @@ StatusOr<HloInstruction*> MakeGetTupleElementHlo(HloInstruction* operand,
HloInstruction::CreateGetTupleElement(gte_shape, operand, index));
}
-StatusOr<HloInstruction*> MakeConcatHlo(ArraySlice<HloInstruction*> operands,
- int64 dimension) {
+StatusOr<HloInstruction*> MakeConcatHlo(
+ absl::Span<HloInstruction* const> operands, int64 dimension) {
CHECK_GT(operands.size(), 0);
HloComputation* computation = operands[0]->parent();
@@ -176,9 +175,8 @@ StatusOr<HloInstruction*> MakeDotHlo(HloInstruction* lhs, HloInstruction* rhs,
HloInstruction::CreateDot(dot_shape, lhs, rhs, dim_numbers));
}
-StatusOr<HloInstruction*> MakeMapHlo(
- tensorflow::gtl::ArraySlice<HloInstruction*> operands,
- HloComputation* map_computation) {
+StatusOr<HloInstruction*> MakeMapHlo(absl::Span<HloInstruction* const> operands,
+ HloComputation* map_computation) {
CHECK(!operands.empty()) << "Map Hlo requires at least one operand.";
HloComputation* computation = operands.front()->parent();
std::vector<const Shape*> operand_shapes;
@@ -235,7 +233,7 @@ StatusOr<HloInstruction*> PrependDegenerateDims(HloInstruction* operand,
}
StatusOr<HloInstruction*> ExpandFirstDimIntoNDims(
- HloInstruction* operand, ArraySlice<int64> expanded_dims) {
+ HloInstruction* operand, absl::Span<const int64> expanded_dims) {
CHECK_GT(operand->shape().dimensions_size(), 0);
CHECK_EQ(operand->shape().dimensions(0), Product(expanded_dims));
@@ -251,8 +249,8 @@ StatusOr<HloInstruction*> ExpandFirstDimIntoNDims(
return MakeReshapeHlo(new_shape, operand);
}
-StatusOr<HloInstruction*> ElideDegenerateDims(HloInstruction* operand,
- ArraySlice<int64> dims_to_elide) {
+StatusOr<HloInstruction*> ElideDegenerateDims(
+ HloInstruction* operand, absl::Span<const int64> dims_to_elide) {
CHECK(absl::c_is_sorted(dims_to_elide));
const Shape& input_shape = operand->shape();
@@ -277,7 +275,7 @@ StatusOr<HloInstruction*> ElideDegenerateDims(HloInstruction* operand,
}
StatusOr<HloInstruction*> InsertDegenerateDims(
- HloInstruction* operand, ArraySlice<int64> dims_to_insert) {
+ HloInstruction* operand, absl::Span<const int64> dims_to_insert) {
CHECK(absl::c_is_sorted(dims_to_insert));
const Shape& operand_shape = operand->shape();
@@ -327,7 +325,7 @@ StatusOr<HloInstruction*> PadVectorWithZeros(HloInstruction* operand,
StatusOr<HloInstruction*> BroadcastZeros(
HloComputation* computation, PrimitiveType element_type,
- ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
HloInstruction* zero =
computation->AddInstruction(HloInstruction::CreateConstant(
absl::make_unique<Literal>(LiteralUtil::Zero(element_type))));
@@ -336,7 +334,7 @@ StatusOr<HloInstruction*> BroadcastZeros(
}
StatusOr<std::unique_ptr<HloComputation>> CreateComputationWithSignature(
- ArraySlice<const Shape*> domain, const Shape& range,
+ absl::Span<const Shape* const> domain, const Shape& range,
absl::string_view name) {
HloComputation::Builder b{string(name)};
int64 param_idx = 0;
diff --git a/tensorflow/compiler/xla/service/hlo_creation_utils.h b/tensorflow/compiler/xla/service/hlo_creation_utils.h
index 1bc6d09b45..a1c4b374d1 100644
--- a/tensorflow/compiler/xla/service/hlo_creation_utils.h
+++ b/tensorflow/compiler/xla/service/hlo_creation_utils.h
@@ -40,10 +40,10 @@ StatusOr<HloInstruction*> MakePadHlo(HloInstruction* operand,
// Creates a slice HLO instruction and adds it to the computation containing
// `operand`.
-StatusOr<HloInstruction*> MakeSliceHlo(
- HloInstruction* operand, tensorflow::gtl::ArraySlice<int64> start_indices,
- tensorflow::gtl::ArraySlice<int64> limit_indices,
- tensorflow::gtl::ArraySlice<int64> strides);
+StatusOr<HloInstruction*> MakeSliceHlo(HloInstruction* operand,
+ absl::Span<const int64> start_indices,
+ absl::Span<const int64> limit_indices,
+ absl::Span<const int64> strides);
// Creates a convolution HLO instruction and adds it to the computation
// containing `lhs` and `rhs` (`lhs` and `rhs` must be in the same computation).
@@ -53,8 +53,8 @@ StatusOr<HloInstruction*> MakeConvolveHlo(
// Creates a transpose HLO instruction and adds it to the computation containing
// `operand`.
-StatusOr<HloInstruction*> MakeTransposeHlo(
- HloInstruction* operand, tensorflow::gtl::ArraySlice<int64> dimensions);
+StatusOr<HloInstruction*> MakeTransposeHlo(HloInstruction* operand,
+ absl::Span<const int64> dimensions);
// Creates a reshape HLO instruction and adds it to the computation containing
// `operand`.
@@ -62,15 +62,14 @@ StatusOr<HloInstruction*> MakeReshapeHlo(const Shape& result_shape,
HloInstruction* operand);
StatusOr<HloInstruction*> MakeReshapeHlo(
- tensorflow::gtl::ArraySlice<int64> result_shape_dim_bounds,
- HloInstruction* operand);
+ absl::Span<const int64> result_shape_dim_bounds, HloInstruction* operand);
// Creates a dynamic-slice HLO instruction and adds it to the computation
// containing `operand` and `start_indices` (`operand` and `start_indices` must
// be in the same computation).
StatusOr<HloInstruction*> MakeDynamicSliceHlo(
HloInstruction* operand, HloInstruction* start_indices,
- tensorflow::gtl::ArraySlice<int64> slice_sizes);
+ absl::Span<const int64> slice_sizes);
// Creates a dynamic-update-slice HLO instruction and adds it to the computation
// containing `operand`, `update` and `start_indices` (`operand`, `update` and
@@ -82,9 +81,8 @@ StatusOr<HloInstruction*> MakeDynamicUpdateSliceHlo(
// Creates a broadcast HLO instruction and adds it to the computation containing
// `operand`.
StatusOr<HloInstruction*> MakeBroadcastHlo(
- HloInstruction* operand,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions,
- tensorflow::gtl::ArraySlice<int64> result_shape_bounds);
+ HloInstruction* operand, absl::Span<const int64> broadcast_dimensions,
+ absl::Span<const int64> result_shape_bounds);
// Creates a GetTupleElement HLO instruction and adds it to the computation
// containing `operand`.
@@ -95,7 +93,7 @@ StatusOr<HloInstruction*> MakeGetTupleElementHlo(HloInstruction* operand,
// containing `operands` (`operands` must be non-empty and every element must be
// contained in the same computation).
StatusOr<HloInstruction*> MakeConcatHlo(
- tensorflow::gtl::ArraySlice<HloInstruction*> operands, int64 dimension);
+ absl::Span<HloInstruction* const> operands, int64 dimension);
// Creates a Dot HLO instruction and adds it to the computation containing `lhs`
// and `rhs` (both must be in the same computation).
@@ -104,9 +102,8 @@ StatusOr<HloInstruction*> MakeDotHlo(HloInstruction* lhs, HloInstruction* rhs,
// Creates a Map HLO instruction and adds it to the computation containing the
// operands. All operands must be in the same computation.
-StatusOr<HloInstruction*> MakeMapHlo(
- tensorflow::gtl::ArraySlice<HloInstruction*> operands,
- HloComputation* map_computation);
+StatusOr<HloInstruction*> MakeMapHlo(absl::Span<HloInstruction* const> operands,
+ HloComputation* map_computation);
// -----------------------------------------------------------------------------
// Some other miscellaneous helpers to generate common HLO patterns. All of
@@ -138,7 +135,7 @@ StatusOr<HloInstruction*> PrependDegenerateDims(HloInstruction* operand,
// For instance if `operand` has shape f32[200,9,7] and expanded_dims is
// {2,5,20} the result is `operand` reshaped to [2,5,20,9,7].
StatusOr<HloInstruction*> ExpandFirstDimIntoNDims(
- HloInstruction* operand, tensorflow::gtl::ArraySlice<int64> expanded_dims);
+ HloInstruction* operand, absl::Span<const int64> expanded_dims);
// Elides (via reshape) a set of degenerate dimensions (dimensions containing
// exactly one element), `dims_to_elide` from `operand`. Every dimension in
@@ -148,7 +145,7 @@ StatusOr<HloInstruction*> ExpandFirstDimIntoNDims(
// For example if `operand` is of shape f32[19,1,20,1,7,1,9] and dims_to_elide
// is {1,5} then the result is `operand` reshaped to [19,20,1,7,9].
StatusOr<HloInstruction*> ElideDegenerateDims(
- HloInstruction* operand, tensorflow::gtl::ArraySlice<int64> dims_to_elide);
+ HloInstruction* operand, absl::Span<const int64> dims_to_elide);
// Inserts (via reshape) a set of degenerate dimensions (dimensions containing
// exactly one element), `dims_to_insert` into `operand`. The dimensions in
@@ -158,7 +155,7 @@ StatusOr<HloInstruction*> ElideDegenerateDims(
// For example, if `operand` is of shape f32[12,21,8,34] and dims_to_insert is
// {0, 2}, then the result is `operand` reshaped to [1,12,1,21,8,34].
StatusOr<HloInstruction*> InsertDegenerateDims(
- HloInstruction* operand, tensorflow::gtl::ArraySlice<int64> dims_to_insert);
+ HloInstruction* operand, absl::Span<const int64> dims_to_insert);
// Pads `operand` (which must have rank 1) with `zeros_to_prepend` zeros in the
// front and `zeros_to_append` zeros in the back.
@@ -171,12 +168,12 @@ StatusOr<HloInstruction*> PadVectorWithZeros(HloInstruction* operand,
// broadcast instruction is emitted into `computation`.
StatusOr<HloInstruction*> BroadcastZeros(
HloComputation* computation, PrimitiveType element_type,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ absl::Span<const int64> broadcast_dimensions);
// Creates a HLO computation that takes arguments of type `domain` and produces
// a value of type `range`.
StatusOr<std::unique_ptr<HloComputation>> CreateComputationWithSignature(
- tensorflow::gtl::ArraySlice<const Shape*> domain, const Shape& range,
+ absl::Span<const Shape* const> domain, const Shape& range,
absl::string_view name);
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/hlo_creation_utils_test.cc b/tensorflow/compiler/xla/service/hlo_creation_utils_test.cc
index 662f008205..eb6affadc8 100644
--- a/tensorflow/compiler/xla/service/hlo_creation_utils_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_creation_utils_test.cc
@@ -24,15 +24,13 @@ limitations under the License.
namespace xla {
namespace {
-using tensorflow::gtl::ArraySlice;
class HloCreationUtilsTest : public HloVerifiedTestBase {
protected:
- HloModule* CreateModuleWithProgramShape(PrimitiveType primitive_type,
- ArraySlice<int64> input_shape_dims,
- ArraySlice<int64> output_shape_dims,
- HloInstruction** param,
- HloComputation** entry_computation) {
+ HloModule* CreateModuleWithProgramShape(
+ PrimitiveType primitive_type, absl::Span<const int64> input_shape_dims,
+ absl::Span<const int64> output_shape_dims, HloInstruction** param,
+ HloComputation** entry_computation) {
Shape input_shape = ShapeUtil::MakeShape(primitive_type, input_shape_dims);
Shape output_shape =
ShapeUtil::MakeShape(primitive_type, output_shape_dims);
diff --git a/tensorflow/compiler/xla/service/hlo_dataflow_analysis.cc b/tensorflow/compiler/xla/service/hlo_dataflow_analysis.cc
index 3376d170e6..6a63681996 100644
--- a/tensorflow/compiler/xla/service/hlo_dataflow_analysis.cc
+++ b/tensorflow/compiler/xla/service/hlo_dataflow_analysis.cc
@@ -46,8 +46,7 @@ namespace {
//
// In this case, we should be able to reuse p0 and output, although p0 has
// multiple uses.
-bool MultiDynamicSliceUseShareSameIndices(
- tensorflow::gtl::ArraySlice<HloUse> uses) {
+bool MultiDynamicSliceUseShareSameIndices(absl::Span<const HloUse> uses) {
if (uses.empty()) {
return false;
}
@@ -221,7 +220,7 @@ string HloDataflowAnalysis::ToString() const {
bool HloDataflowAnalysis::Phi(
HloInstruction* instruction,
- tensorflow::gtl::ArraySlice<const InstructionValueSet*> inputs) {
+ absl::Span<const InstructionValueSet* const> inputs) {
CHECK(ssa_form_);
VLOG(4) << "Phi(" << instruction->name() << ")";
VLOG(5) << "instruction value set = "
diff --git a/tensorflow/compiler/xla/service/hlo_dataflow_analysis.h b/tensorflow/compiler/xla/service/hlo_dataflow_analysis.h
index a1678d4943..6d5c375d6d 100644
--- a/tensorflow/compiler/xla/service/hlo_dataflow_analysis.h
+++ b/tensorflow/compiler/xla/service/hlo_dataflow_analysis.h
@@ -202,7 +202,7 @@ class HloDataflowAnalysis {
// the given instruction. If skip_top_level is true, then the top level of the
// value set of 'instruction' is not modified.
bool Phi(HloInstruction* instruction,
- tensorflow::gtl::ArraySlice<const InstructionValueSet*> inputs);
+ absl::Span<const InstructionValueSet* const> inputs);
// Updates the positions of the HloValues in the output of the given
// instruction. This should be called after the instruction value set of
diff --git a/tensorflow/compiler/xla/service/hlo_evaluator.cc b/tensorflow/compiler/xla/service/hlo_evaluator.cc
index c25869f87b..d316645a0b 100644
--- a/tensorflow/compiler/xla/service/hlo_evaluator.cc
+++ b/tensorflow/compiler/xla/service/hlo_evaluator.cc
@@ -53,7 +53,6 @@ namespace xla {
namespace {
-using tensorflow::gtl::ArraySlice;
template <typename OperandT>
StatusOr<std::unique_ptr<Literal>> Compare(const Shape& shape, HloOpcode opcode,
@@ -97,10 +96,11 @@ StatusOr<std::unique_ptr<Literal>> Compare(const Shape& shape, HloOpcode opcode,
}
auto result = absl::make_unique<Literal>(shape);
- TF_RETURN_IF_ERROR(result->Populate<bool>([&](ArraySlice<int64> multi_index) {
- return compare_op(lhs_literal.Get<OperandT>(multi_index),
- rhs_literal.Get<OperandT>(multi_index));
- }));
+ TF_RETURN_IF_ERROR(
+ result->Populate<bool>([&](absl::Span<const int64> multi_index) {
+ return compare_op(lhs_literal.Get<OperandT>(multi_index),
+ rhs_literal.Get<OperandT>(multi_index));
+ }));
return std::move(result);
}
@@ -127,10 +127,11 @@ StatusOr<std::unique_ptr<Literal>> Compare<complex64>(
}
auto result = absl::make_unique<Literal>(shape);
- TF_RETURN_IF_ERROR(result->Populate<bool>([&](ArraySlice<int64> multi_index) {
- return compare_op(lhs_literal.Get<complex64>(multi_index),
- rhs_literal.Get<complex64>(multi_index));
- }));
+ TF_RETURN_IF_ERROR(
+ result->Populate<bool>([&](absl::Span<const int64> multi_index) {
+ return compare_op(lhs_literal.Get<complex64>(multi_index),
+ rhs_literal.Get<complex64>(multi_index));
+ }));
return std::move(result);
}
@@ -194,7 +195,7 @@ HloEvaluator::HloEvaluator(int64 max_loop_iterations)
template <typename LiteralPtr>
StatusOr<std::unique_ptr<Literal>> HloEvaluator::Evaluate(
- const HloModule& module, ArraySlice<LiteralPtr> arg_literals) {
+ const HloModule& module, absl::Span<const LiteralPtr> arg_literals) {
XLA_VLOG_LINES(2, "HloEvaluator::Evaluate module:\n" + module.ToString());
evaluated_.clear();
@@ -211,7 +212,8 @@ StatusOr<std::unique_ptr<Literal>> HloEvaluator::Evaluate(
template <typename LiteralPtr>
StatusOr<std::unique_ptr<Literal>> HloEvaluator::Evaluate(
- const HloComputation& computation, ArraySlice<LiteralPtr> arg_literals) {
+ const HloComputation& computation,
+ absl::Span<const LiteralPtr> arg_literals) {
CHECK(computation.parent() != nullptr);
XLA_VLOG_LINES(
2, "HloEvaluator::Evaluate computation:\n" + computation.ToString());
@@ -228,7 +230,7 @@ StatusOr<std::unique_ptr<Literal>> HloEvaluator::Evaluate(
template <typename LiteralPtr>
StatusOr<std::unique_ptr<Literal>> HloEvaluator::Evaluate(
- HloInstruction* instruction, ArraySlice<LiteralPtr> arg_literals) {
+ HloInstruction* instruction, absl::Span<const LiteralPtr> arg_literals) {
TF_RET_CHECK(hlo_query::AllOperandsAreParametersOrConstants(*instruction));
evaluated_.clear();
@@ -390,7 +392,7 @@ Status HloEvaluator::HandleTranspose(HloInstruction* transpose) {
}
Status HloEvaluator::HandleConcatenate(HloInstruction* concatenate) {
- ArraySlice<HloInstruction*> operands(concatenate->operands());
+ absl::Span<HloInstruction* const> operands(concatenate->operands());
// The result concatenate dimension is going to be the sum of all
// concatenate dimensions of the operands taking part of the operation.
const Shape& reference_shape = operands[0]->shape();
@@ -588,7 +590,7 @@ ShapeUtil::IndexIterationSpace IterationSpaceForOutputBatchIndices(
// Return an ShapeUtil::IndexIterationSpace that iterates over the output slice
// dimensions while keeping the rest of the output dimensions clamped to 0.
ShapeUtil::IndexIterationSpace IterationSpaceForOutputOffsetIndices(
- int64 output_rank, ArraySlice<int64> slice_sizes,
+ int64 output_rank, absl::Span<const int64> slice_sizes,
const GatherDimensionNumbers& dim_numbers) {
std::vector<int64> index_base(output_rank, 0);
std::vector<int64> index_count(output_rank, 1);
@@ -661,11 +663,12 @@ class OutputBatchIndexToInputIndex {
// same storage for all invocations.
//
// This returns an arrayslice into memory owned by the class.
- StatusOr<ArraySlice<int64>> operator()(ArraySlice<int64> output_index) {
+ StatusOr<absl::Span<const int64>> operator()(
+ absl::Span<const int64> output_index) {
PropagateOutputIndexGatherDimsToIndexVectorIndex(output_index);
TF_RETURN_IF_ERROR(FetchIndexVector());
PropagateIndexVectorToInputIndex();
- return ArraySlice<int64>(input_index_);
+ return absl::Span<const int64>(input_index_);
}
private:
@@ -674,7 +677,7 @@ class OutputBatchIndexToInputIndex {
// update the dim_numbers.index_vector_dim() dimension -- that's the dimension
// we iterate over in FetchIndexVector.
void PropagateOutputIndexGatherDimsToIndexVectorIndex(
- ArraySlice<int64> output_index) {
+ absl::Span<const int64> output_index) {
int64 index_vector_index_i = 0;
for (int64 i = 0, e = output_index.size(); i < e; i++) {
if (!output_dim_is_batch_dims_[i]) {
@@ -729,7 +732,7 @@ class OutputBatchIndexToInputIndex {
// The index vector fetched from start_indices_.
std::vector<int64> index_vector_;
- // The result computed by this functor. operator() returns an ArraySlice into
+ // The result computed by this functor. operator() returns a Span into
// this vector.
std::vector<int64> input_index_;
@@ -779,9 +782,10 @@ class OutputOffsetIndexToInputIndex {
// result (input_index_), mutating it in place.
//
// This returns an arrayslice into memory owned by the class.
- StatusOr<ArraySlice<int64>> operator()(ArraySlice<int64> output_index) {
+ StatusOr<absl::Span<const int64>> operator()(
+ absl::Span<const int64> output_index) {
PropagateOutputIndexWindowDimsToInputIndex(output_index);
- return ArraySlice<int64>(input_index_);
+ return absl::Span<const int64>(input_index_);
}
// Returns for a given 'input_dim' the corresponding output dimension index,
@@ -794,7 +798,7 @@ class OutputOffsetIndexToInputIndex {
// Propagates window dimensions from the output index to input_index_ by
// mutating input_index_ in place.
void PropagateOutputIndexWindowDimsToInputIndex(
- ArraySlice<int64> output_index) {
+ absl::Span<const int64> output_index) {
for (int64 i = 0, e = input_index_.size(); i < e; i++) {
if (input_dim_value_to_output_index_[i] != -1) {
input_index_[i] = output_index[input_dim_value_to_output_index_[i]];
@@ -810,7 +814,7 @@ class OutputOffsetIndexToInputIndex {
// PropagateOutputIndexWindowDimsToInputIndex.
std::vector<int64> input_dim_value_to_output_index_;
- // The result computed by this functor. operator() returns an ArraySlice into
+ // The result computed by this functor. operator() returns a Span into
// this vector.
std::vector<int64> input_index_;
};
@@ -872,11 +876,11 @@ Status HloEvaluator::HandleGather(HloInstruction* gather) {
const Shape& operand_shape = operand.shape();
auto gather_inner_loop_body =
- [&](ArraySlice<int64> output_window_index,
- ArraySlice<int64> input_gather_index,
- ArraySlice<int64> output_gather_index) -> StatusOr<bool> {
+ [&](absl::Span<const int64> output_window_index,
+ absl::Span<const int64> input_gather_index,
+ absl::Span<const int64> output_gather_index) -> StatusOr<bool> {
TF_ASSIGN_OR_RETURN(
- ArraySlice<int64> input_window_index,
+ absl::Span<const int64> input_window_index,
output_offset_index_to_input_index(output_window_index));
for (int i = 0, e = output_index.size(); i < e; i++) {
output_index[i] = output_gather_index[i] + output_window_index[i];
@@ -909,8 +913,8 @@ Status HloEvaluator::HandleGather(HloInstruction* gather) {
};
auto gather_outer_loop_body =
- [&](ArraySlice<int64> output_gather_index) -> StatusOr<bool> {
- TF_ASSIGN_OR_RETURN(ArraySlice<int64> input_gather_index,
+ [&](absl::Span<const int64> output_gather_index) -> StatusOr<bool> {
+ TF_ASSIGN_OR_RETURN(absl::Span<const int64> input_gather_index,
output_batch_index_to_input_index(output_gather_index));
TF_RETURN_IF_ERROR(ShapeUtil::ForEachIndexWithStatus(
shape, offset_indices_iteration_space,
@@ -1170,12 +1174,11 @@ StatusOr<std::unique_ptr<Literal>> EvaluateSortInternal(
result_values.push_back(key_value.second);
}
auto result_keys_literal = absl::make_unique<Literal>(keys_literal.shape());
- result_keys_literal->PopulateR1(
- tensorflow::gtl::ArraySlice<KeyType>(result_keys));
+ result_keys_literal->PopulateR1(absl::Span<const KeyType>(result_keys));
auto result_values_literal =
absl::make_unique<Literal>(values_literal.shape());
result_values_literal->PopulateR1(
- tensorflow::gtl::ArraySlice<ValueType>(result_values));
+ absl::Span<const ValueType>(result_values));
return std::make_pair(std::move(result_keys_literal),
std::move(result_values_literal));
};
@@ -1311,26 +1314,27 @@ Status HloEvaluator::Postprocess(HloInstruction* hlo) {
// Explicit instantiation of templatized Evaluate* methods.
//
template StatusOr<std::unique_ptr<Literal>>
-HloEvaluator::Evaluate<const Literal*>(const HloModule& module,
- ArraySlice<const Literal*> arg_literals);
+HloEvaluator::Evaluate<const Literal*>(
+ const HloModule& module, absl::Span<const Literal* const> arg_literals);
template StatusOr<std::unique_ptr<Literal>>
HloEvaluator::Evaluate<std::unique_ptr<Literal>>(
- const HloModule& module, ArraySlice<std::unique_ptr<Literal>> arg_literals);
+ const HloModule& module,
+ absl::Span<const std::unique_ptr<Literal>> arg_literals);
-template StatusOr<std::unique_ptr<Literal>>
-HloEvaluator::Evaluate<const Literal*>(const HloComputation& computation,
- ArraySlice<const Literal*> arg_literals);
+template StatusOr<std::unique_ptr<Literal>> HloEvaluator::Evaluate<
+ const Literal*>(const HloComputation& computation,
+ absl::Span<const Literal* const> arg_literals);
template StatusOr<std::unique_ptr<Literal>>
HloEvaluator::Evaluate<std::unique_ptr<Literal>>(
const HloComputation& computation,
- ArraySlice<std::unique_ptr<Literal>> arg_literals);
+ absl::Span<const std::unique_ptr<Literal>> arg_literals);
template StatusOr<std::unique_ptr<Literal>>
-HloEvaluator::Evaluate<const Literal*>(HloInstruction* instruction,
- ArraySlice<const Literal*> arg_literals);
+HloEvaluator::Evaluate<const Literal*>(
+ HloInstruction* instruction, absl::Span<const Literal* const> arg_literals);
template StatusOr<std::unique_ptr<Literal>>
HloEvaluator::Evaluate<std::unique_ptr<Literal>>(
HloInstruction* instruction,
- ArraySlice<std::unique_ptr<Literal>> arg_literals);
+ absl::Span<const std::unique_ptr<Literal>> arg_literals);
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/hlo_evaluator.h b/tensorflow/compiler/xla/service/hlo_evaluator.h
index 980a7fb9fa..3feb4e626f 100644
--- a/tensorflow/compiler/xla/service/hlo_evaluator.h
+++ b/tensorflow/compiler/xla/service/hlo_evaluator.h
@@ -51,8 +51,7 @@ class HloEvaluator : public DfsHloVisitorWithDefault {
// type.
template <typename LiteralPtr>
StatusOr<std::unique_ptr<Literal>> Evaluate(
- const HloModule& module,
- tensorflow::gtl::ArraySlice<LiteralPtr> arg_literals);
+ const HloModule& module, absl::Span<const LiteralPtr> arg_literals);
// Evaluates an HLO computation and an array of pointers to literals.
// Returns the evaluated result as a literal if successful.
@@ -75,7 +74,7 @@ class HloEvaluator : public DfsHloVisitorWithDefault {
template <typename LiteralPtr>
StatusOr<std::unique_ptr<Literal>> Evaluate(
const HloComputation& computation,
- tensorflow::gtl::ArraySlice<LiteralPtr> arg_literals);
+ absl::Span<const LiteralPtr> arg_literals);
// Evaluates a single HLO instruction and an array of pointers to literals.
// Return the evaluated result as literal if successful.
@@ -87,8 +86,7 @@ class HloEvaluator : public DfsHloVisitorWithDefault {
// type.
template <typename LiteralPtr>
StatusOr<std::unique_ptr<Literal>> Evaluate(
- HloInstruction* instruction,
- tensorflow::gtl::ArraySlice<LiteralPtr> arg_literals);
+ HloInstruction* instruction, absl::Span<const LiteralPtr> arg_literals);
// Evaluates a single HLO instruction with constant operands.
// Returns the evaluated result as literal if successful.
@@ -229,8 +227,8 @@ class HloEvaluator : public DfsHloVisitorWithDefault {
}
auto result = absl::make_unique<Literal>(shape);
- TF_RETURN_IF_ERROR(result->Populate<ReturnT>(
- [&](tensorflow::gtl::ArraySlice<int64> multi_index) {
+ TF_RETURN_IF_ERROR(
+ result->Populate<ReturnT>([&](absl::Span<const int64> multi_index) {
return unary_op(operand_literal.Get<NativeT>(multi_index));
}));
return std::move(result);
diff --git a/tensorflow/compiler/xla/service/hlo_evaluator_test.cc b/tensorflow/compiler/xla/service/hlo_evaluator_test.cc
index e3eb60a851..626daa527b 100644
--- a/tensorflow/compiler/xla/service/hlo_evaluator_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_evaluator_test.cc
@@ -60,7 +60,7 @@ class HloEvaluatorTest : public ::testing::WithParamInterface<bool>,
}
std::unique_ptr<Literal> Evaluate(
- tensorflow::gtl::ArraySlice<const Literal*> arg_literals = {}) {
+ absl::Span<const Literal* const> arg_literals = {}) {
if (use_bfloat16_) {
// In BF16 mode, we convert all F32 type to BF16 and evaluate the module.
auto type_converter = HloElementTypeConverter(F32, BF16);
@@ -344,7 +344,7 @@ TEST_P(HloEvaluatorTest, DoesReshape) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<F32>::type;
result->EachCell<NativeT>(
- [&](tensorflow::gtl::ArraySlice<int64> indices, NativeT value) {
+ [&](absl::Span<const int64> indices, NativeT value) {
std::vector<int64> rindexes = Permute(permutation, indices);
EXPECT_NEAR(value, literal_clone->Get<NativeT>(rindexes), 0.031250);
});
diff --git a/tensorflow/compiler/xla/service/hlo_evaluator_typed_visitor.h b/tensorflow/compiler/xla/service/hlo_evaluator_typed_visitor.h
index d35163ebb8..980e343035 100644
--- a/tensorflow/compiler/xla/service/hlo_evaluator_typed_visitor.h
+++ b/tensorflow/compiler/xla/service/hlo_evaluator_typed_visitor.h
@@ -97,7 +97,7 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
typename NativeT,
typename std::enable_if<!is_complex_t<NativeT>::value>::type* = nullptr>
double GetAsDouble(const Literal& literal,
- tensorflow::gtl::ArraySlice<int64> input_index) {
+ absl::Span<const int64> input_index) {
return static_cast<double>(literal.Get<NativeT>(input_index));
}
@@ -109,7 +109,7 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
typename NativeT,
typename std::enable_if<is_complex_t<NativeT>::value>::type* = nullptr>
double GetAsDouble(const Literal& literal,
- tensorflow::gtl::ArraySlice<int64> input_index) {
+ absl::Span<const int64> input_index) {
LOG(FATAL) << "Trying to get complex literal as double: "
<< literal.ToString();
}
@@ -980,8 +980,8 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
const Literal& operand_literal = parent_->GetEvaluatedLiteralFor(operand);
auto result = absl::make_unique<Literal>(result_shape);
- TF_RETURN_IF_ERROR(result->Populate<ReturnT>(
- [&](tensorflow::gtl::ArraySlice<int64> out_index) {
+ TF_RETURN_IF_ERROR(
+ result->Populate<ReturnT>([&](absl::Span<const int64> out_index) {
std::vector<int64> from_index(out_index.begin(), out_index.end());
for (const int64 dim : reverse_dimensions) {
from_index[dim] = result_shape.dimensions(dim) - 1 - out_index[dim];
@@ -1048,8 +1048,7 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
auto func = [&window_shape, &dnums, &lhs_shape, &rhs_shape, &window,
&lhs_dim_multipliers, &rhs_dim_multipliers, lhs_literal_data,
- rhs_literal_data](
- tensorflow::gtl::ArraySlice<int64> out_index) {
+ rhs_literal_data](absl::Span<const int64> out_index) {
// Dimension number applicable for input (lhs).
const int64 input_batch_dim = dnums.input_batch_dimension();
const int64 input_z_dim = dnums.input_feature_dimension();
@@ -1211,8 +1210,8 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
}
auto result = absl::make_unique<Literal>(dot->shape());
- TF_RETURN_IF_ERROR(result->Populate<ReturnT>(
- [&](tensorflow::gtl::ArraySlice<int64> result_index) {
+ TF_RETURN_IF_ERROR(
+ result->Populate<ReturnT>([&](absl::Span<const int64> result_index) {
ElementwiseT result_val = static_cast<ElementwiseT>(0);
for (int64 i = 0; i < result_index.size(); i++) {
@@ -1261,9 +1260,7 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
parent_->GetEvaluatedLiteralFor(pad->operand(1)).Get<ReturnT>({});
auto result = absl::make_unique<Literal>(pad->shape());
TF_RETURN_IF_ERROR(result->Populate<ReturnT>(
- [&scalar](tensorflow::gtl::ArraySlice<int64> multi_index) {
- return scalar;
- }));
+ [&scalar](absl::Span<const int64> multi_index) { return scalar; }));
const Literal& evaluated_operand =
parent_->GetEvaluatedLiteralFor(pad->operand(0));
@@ -1276,7 +1273,7 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
// corresponding index of the resulting padded literal.
const PaddingConfig& pad_config = pad->padding_config();
- auto func = [&](tensorflow::gtl::ArraySlice<int64> input_index) {
+ auto func = [&](absl::Span<const int64> input_index) {
for (auto i = 0; i < input_index.size(); ++i) {
// Interior padding occurs logically before edge padding, so in the case
// of negative edge padding elements are removed from the
@@ -1427,8 +1424,8 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
auto result = absl::make_unique<Literal>(map->shape());
HloEvaluator embedded_evaluator(parent_->max_loop_iterations_);
- TF_RETURN_IF_ERROR(result->Populate<ReturnT>(
- [&](tensorflow::gtl::ArraySlice<int64> multi_index) {
+ TF_RETURN_IF_ERROR(
+ result->Populate<ReturnT>([&](absl::Span<const int64> multi_index) {
std::vector<std::unique_ptr<Literal>> arg_literals;
arg_literals.reserve(operands.size());
@@ -1539,8 +1536,7 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
return SafeLess<ReturnT>(a, b);
});
auto result_literal = absl::make_unique<Literal>(keys_literal.shape());
- result_literal->PopulateR1(
- tensorflow::gtl::ArraySlice<ReturnT>(result_data));
+ result_literal->PopulateR1(absl::Span<const ReturnT>(result_data));
VLOG(3) << "HandleSort result_literal: " << result_literal->ToString();
return result_literal;
};
@@ -1582,7 +1578,7 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
HloReduceInstruction* reduce = Cast<HloReduceInstruction>(hlo);
int64 num_args = reduce->inputs().size();
bool has_tuple_output = ShapeUtil::IsTuple(reduce->shape());
- tensorflow::gtl::ArraySlice<int64> dimensions(reduce->dimensions());
+ absl::Span<const int64> dimensions(reduce->dimensions());
HloComputation* function = reduce->to_apply();
absl::InlinedVector<const Shape*, 1> operand_shapes;
@@ -1650,7 +1646,7 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
for (int64 input = 0; input < num_args; ++input) {
TF_RETURN_IF_ERROR(results[input]->Populate<ReturnT>(
- [&](tensorflow::gtl::ArraySlice<int64> multi_index) {
+ [&](absl::Span<const int64> multi_index) {
if (!eval_status.ok()) {
return init_scalars[input];
}
@@ -1668,7 +1664,7 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
IsScalarAdd(function)) {
CHECK_EQ(num_args, 1);
double computed_result = 0;
- auto func = [&](tensorflow::gtl::ArraySlice<int64> input_index) {
+ auto func = [&](absl::Span<const int64> input_index) {
computed_result +=
GetAsDouble<ReturnT>(*arg_literals[0], input_index);
return true;
@@ -1677,8 +1673,8 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
arg_dim_counts, arg_dim_steps, func);
return static_cast<ReturnT>(computed_result);
}
- auto func = [&](tensorflow::gtl::ArraySlice<int64> input_index)
- -> StatusOr<bool> {
+ auto func =
+ [&](absl::Span<const int64> input_index) -> StatusOr<bool> {
absl::InlinedVector<ReturnT, 1> arg_values(num_args);
for (int64 i = 0; i < num_args; ++i) {
arg_values[i] = arg_literals[i]->Get<ReturnT>(input_index);
@@ -1767,9 +1763,7 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
// Initialize result array with the init value.
TF_RETURN_IF_ERROR(result->Populate<ReturnT>(
- [&](tensorflow::gtl::ArraySlice<int64> output_index) {
- return init_scalar;
- }));
+ [&](absl::Span<const int64> output_index) { return init_scalar; }));
std::vector<int64> window_dimension_sizes;
for (const auto& window_dimension : window.dimensions()) {
@@ -1902,8 +1896,8 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
HloEvaluator embedded_evaluator(parent_->max_loop_iterations_);
auto result = absl::make_unique<Literal>(reduce_window->shape());
// For each resulting dimension, calculate and assign computed value.
- TF_RETURN_IF_ERROR(result->Populate<ReturnT>(
- [&](tensorflow::gtl::ArraySlice<int64> output_index) {
+ TF_RETURN_IF_ERROR(
+ result->Populate<ReturnT>([&](absl::Span<const int64> output_index) {
ReturnT result_val = init_scalar;
std::fill(window_index.begin(), window_index.end(), 0);
@@ -2049,12 +2043,12 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
// same storage for all invocations.
//
// This returns an arrayslice into memory owned by the class.
- StatusOr<tensorflow::gtl::ArraySlice<int64>> operator()(
- tensorflow::gtl::ArraySlice<int64> update_index) {
+ StatusOr<absl::Span<const int64>> operator()(
+ absl::Span<const int64> update_index) {
PropagateUpdateIndexScatterDimsToIndexVectorIndex(update_index);
TF_RETURN_IF_ERROR(FetchIndexVector());
PropagateIndexVectorToInputIndex();
- return tensorflow::gtl::ArraySlice<int64>(input_index_);
+ return absl::Span<const int64>(input_index_);
}
private:
@@ -2063,7 +2057,7 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
// update the dim_numbers.index_vector_dim() dimension -- that's the
// dimension we iterate over in FetchIndexVector.
void PropagateUpdateIndexScatterDimsToIndexVectorIndex(
- tensorflow::gtl::ArraySlice<int64> update_index) {
+ absl::Span<const int64> update_index) {
int64 index_vector_index_i = 0;
for (int64 i = 0, e = update_index.size(); i < e; i++) {
if (!update_dim_is_scatter_dims_[i]) {
@@ -2118,7 +2112,7 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
// The index vector fetched from scatter_indices_.
std::vector<int64> index_vector_;
- // The result computed by this functor. operator() returns an ArraySlice
+ // The result computed by this functor. operator() returns a Span
// into this vector.
std::vector<int64> input_index_;
@@ -2172,10 +2166,10 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
// result (input_index_), mutating it in place.
//
// This returns an arrayslice into memory owned by the class.
- StatusOr<tensorflow::gtl::ArraySlice<int64>> operator()(
- tensorflow::gtl::ArraySlice<int64> update_index) {
+ StatusOr<absl::Span<const int64>> operator()(
+ absl::Span<const int64> update_index) {
PropagateUpdateIndexWindowDimsToInputIndex(update_index);
- return tensorflow::gtl::ArraySlice<int64>(input_index_);
+ return absl::Span<const int64>(input_index_);
}
// Returns for a given 'input_dim' the corresponding update dimension index,
@@ -2188,7 +2182,7 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
// Propagates window dimensions from the update index to input_index_ by
// mutating input_index_ in place.
void PropagateUpdateIndexWindowDimsToInputIndex(
- tensorflow::gtl::ArraySlice<int64> update_index) {
+ absl::Span<const int64> update_index) {
for (int64 i = 0, e = input_index_.size(); i < e; i++) {
if (input_dim_value_to_update_index_[i] != -1) {
input_index_[i] = update_index[input_dim_value_to_update_index_[i]];
@@ -2204,7 +2198,7 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
// PropagateUpdateIndexWindowDimsToInputIndex.
std::vector<int64> input_dim_value_to_update_index_;
- // The result computed by this functor. operator() returns an ArraySlice
+ // The result computed by this functor. operator() returns a Span
// into this vector.
std::vector<int64> input_index_;
};
@@ -2247,12 +2241,11 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
std::unique_ptr<Literal> result = operand.CloneToUnique();
HloEvaluator embedded_evaluator;
auto scatter_inner_loop_body =
- [&](tensorflow::gtl::ArraySlice<int64> update_window_index,
- tensorflow::gtl::ArraySlice<int64> input_scatter_index,
- tensorflow::gtl::ArraySlice<int64> update_scatter_index)
- -> StatusOr<bool> {
+ [&](absl::Span<const int64> update_window_index,
+ absl::Span<const int64> input_scatter_index,
+ absl::Span<const int64> update_scatter_index) -> StatusOr<bool> {
TF_ASSIGN_OR_RETURN(
- tensorflow::gtl::ArraySlice<int64> input_window_index,
+ absl::Span<const int64> input_window_index,
update_window_index_to_input_index(update_window_index));
for (int i = 0, e = update_index.size(); i < e; i++) {
update_index[i] = update_scatter_index[i] + update_window_index[i];
@@ -2301,14 +2294,13 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
};
auto scatter_outer_loop_body =
- [&](tensorflow::gtl::ArraySlice<int64> update_scatter_index)
- -> StatusOr<bool> {
+ [&](absl::Span<const int64> update_scatter_index) -> StatusOr<bool> {
TF_ASSIGN_OR_RETURN(
- tensorflow::gtl::ArraySlice<int64> input_scatter_index,
+ absl::Span<const int64> input_scatter_index,
update_scatter_index_to_input_index(update_scatter_index));
TF_RETURN_IF_ERROR(ShapeUtil::ForEachIndexWithStatus(
updates_shape, window_indices_iteration_space,
- [&](tensorflow::gtl::ArraySlice<int64> update_window_index) {
+ [&](absl::Span<const int64> update_window_index) {
return scatter_inner_loop_body(
update_window_index, input_scatter_index, update_scatter_index);
}));
@@ -2336,7 +2328,7 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
const int64 rank = ShapeUtil::Rank(operand->shape());
const Literal& operand_literal = parent_->GetEvaluatedLiteralFor(operand);
- auto func = [&](tensorflow::gtl::ArraySlice<int64> out_index) {
+ auto func = [&](absl::Span<const int64> out_index) {
DimensionVector operand_index(rank);
for (int64 i = 0; i < rank; ++i) {
operand_index[i] =
@@ -2607,7 +2599,7 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
// bound, call `f` with the base index.
static void IterateThroughWindow(
const Shape& window_shape, const Window& window, const Shape& base_shape,
- const tensorflow::gtl::ArraySlice<int64>& window_count_index,
+ const absl::Span<const int64>& window_count_index,
const std::function<void(const std::vector<int64>&)>& f) {
const int64 rank = ShapeUtil::Rank(base_shape);
DimensionVector window_index(rank);
@@ -2647,8 +2639,8 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
std::vector<int64> operand_indices(start.size());
auto result = absl::make_unique<Literal>(result_shape);
- TF_RETURN_IF_ERROR(result->Populate<ReturnT>(
- [&](tensorflow::gtl::ArraySlice<int64> multi_index) {
+ TF_RETURN_IF_ERROR(
+ result->Populate<ReturnT>([&](absl::Span<const int64> multi_index) {
for (int64 i = 0; i < operand_indices.size(); ++i) {
CHECK_GE(multi_index[i] + start[i], 0);
operand_indices[i] = multi_index[i] + start[i];
@@ -2679,7 +2671,7 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
}
std::vector<int64> result_index(rank, 0);
- auto func = [&](tensorflow::gtl::ArraySlice<int64> update_index) {
+ auto func = [&](absl::Span<const int64> update_index) {
std::transform(update_index.begin(), update_index.end(), start.begin(),
result_index.begin(), std::plus<int64>());
result->Set<ReturnT>(result_index,
@@ -2733,8 +2725,8 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
auto result = absl::make_unique<Literal>(shape);
- TF_RETURN_IF_ERROR(result->Populate<ReturnT>(
- [&](tensorflow::gtl::ArraySlice<int64> multi_index) {
+ TF_RETURN_IF_ERROR(
+ result->Populate<ReturnT>([&](absl::Span<const int64> multi_index) {
return ConvertBinaryFunction(binary_op)(
lhs_literal.Get<ReturnT>(multi_index),
rhs_literal.Get<ReturnT>(multi_index));
@@ -2770,8 +2762,8 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
auto result = absl::make_unique<Literal>(shape);
- TF_RETURN_IF_ERROR(result->Populate<ReturnT>(
- [&](tensorflow::gtl::ArraySlice<int64> multi_index) {
+ TF_RETURN_IF_ERROR(
+ result->Populate<ReturnT>([&](absl::Span<const int64> multi_index) {
return ternary_op(lhs_literal.Get<LhsType>(multi_index),
rhs_literal.Get<RhsType>(multi_index),
ehs_literal.Get<EhsType>(multi_index));
diff --git a/tensorflow/compiler/xla/service/hlo_instruction.cc b/tensorflow/compiler/xla/service/hlo_instruction.cc
index b747a4ea5f..bd0b6af10d 100644
--- a/tensorflow/compiler/xla/service/hlo_instruction.cc
+++ b/tensorflow/compiler/xla/service/hlo_instruction.cc
@@ -113,7 +113,7 @@ StatusOr<std::unique_ptr<HloInstruction>> HloInstruction::CreateFromProto(
std::vector<int64> fft_length(proto.fft_length().begin(),
proto.fft_length().end());
instruction = CreateFft(proto.shape(), operands(0), proto.fft_type(),
- tensorflow::gtl::ArraySlice<int64>(fft_length));
+ absl::Span<const int64>(fft_length));
break;
}
case HloOpcode::kSend:
@@ -519,13 +519,13 @@ HloInstruction::CreateGetTupleElement(const Shape& shape,
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateRng(
const Shape& shape, RandomDistribution distribution,
- tensorflow::gtl::ArraySlice<HloInstruction*> parameters) {
+ absl::Span<HloInstruction* const> parameters) {
return absl::make_unique<HloRngInstruction>(shape, distribution, parameters);
}
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateNary(
const Shape& shape, HloOpcode opcode,
- tensorflow::gtl::ArraySlice<HloInstruction*> operands) {
+ absl::Span<HloInstruction* const> operands) {
if (opcode == HloOpcode::kCopy) {
// It is impossible to copy an opaque shape, we don't know how big it is.
CHECK(!ShapeUtil::IsOpaque(shape));
@@ -627,13 +627,13 @@ HloInstruction::CreateGetTupleElement(const Shape& shape,
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateVariadic(
const Shape& shape, HloOpcode opcode,
- tensorflow::gtl::ArraySlice<HloInstruction*> operands) {
+ absl::Span<HloInstruction* const> operands) {
CHECK_EQ(HloOpcode::kTuple, opcode);
return CreateNary(shape, opcode, operands);
}
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateMap(
- const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> operands,
+ const Shape& shape, absl::Span<HloInstruction* const> operands,
HloComputation* map_computation) {
return absl::make_unique<HloMapInstruction>(shape, operands, map_computation);
}
@@ -648,7 +648,7 @@ HloInstruction::CreateGetTupleElement(const Shape& shape,
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateFft(
const Shape& shape, HloInstruction* operand, FftType fft_type,
- tensorflow::gtl::ArraySlice<int64> fft_length) {
+ absl::Span<const int64> fft_length) {
return absl::make_unique<HloFftInstruction>(shape, operand, fft_type,
fft_length);
}
@@ -692,7 +692,7 @@ HloInstruction::CreateReducePrecision(const Shape& shape,
/* static */ std::unique_ptr<HloInstruction>
HloInstruction::CreateCrossReplicaSum(
- const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> operands,
+ const Shape& shape, absl::Span<HloInstruction* const> operands,
HloComputation* reduce_computation,
const std::vector<ReplicaGroup>& replica_groups, absl::string_view barrier,
const absl::optional<int64>& all_reduce_id) {
@@ -702,7 +702,7 @@ HloInstruction::CreateCrossReplicaSum(
}
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateAllToAll(
- const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> operands,
+ const Shape& shape, absl::Span<HloInstruction* const> operands,
const std::vector<ReplicaGroup>& replica_groups) {
return absl::make_unique<HloAllToAllInstruction>(shape, operands,
replica_groups);
@@ -764,12 +764,12 @@ HloInstruction::CreateCollectivePermute(
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateReverse(
const Shape& shape, HloInstruction* operand,
- tensorflow::gtl::ArraySlice<int64> dimensions) {
+ absl::Span<const int64> dimensions) {
return absl::make_unique<HloReverseInstruction>(shape, operand, dimensions);
}
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateAfterAll(
- tensorflow::gtl::ArraySlice<HloInstruction*> operands) {
+ absl::Span<HloInstruction* const> operands) {
CHECK(!operands.empty());
auto instruction = absl::WrapUnique(
new HloInstruction(HloOpcode::kAfterAll, ShapeUtil::MakeTokenShape()));
@@ -815,16 +815,15 @@ HloInstruction::CreateCollectivePermute(
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateSlice(
const Shape& shape, HloInstruction* operand,
- tensorflow::gtl::ArraySlice<int64> start_indices,
- tensorflow::gtl::ArraySlice<int64> limit_indices,
- tensorflow::gtl::ArraySlice<int64> strides) {
+ absl::Span<const int64> start_indices,
+ absl::Span<const int64> limit_indices, absl::Span<const int64> strides) {
return absl::make_unique<HloSliceInstruction>(shape, operand, start_indices,
limit_indices, strides);
}
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateDynamicSlice(
const Shape& shape, HloInstruction* operand, HloInstruction* start_indices,
- tensorflow::gtl::ArraySlice<int64> slice_sizes) {
+ absl::Span<const int64> slice_sizes) {
return absl::make_unique<HloDynamicSliceInstruction>(
shape, operand, start_indices, slice_sizes);
}
@@ -843,7 +842,7 @@ HloInstruction::CreateDynamicUpdateSlice(const Shape& shape,
}
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateConcatenate(
- const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> operands,
+ const Shape& shape, absl::Span<HloInstruction* const> operands,
int64 dimension) {
return absl::make_unique<HloConcatenateInstruction>(shape, operands,
dimension);
@@ -868,7 +867,7 @@ HloInstruction::CreateBitcastConvert(const Shape& shape,
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateReduce(
const Shape& shape, HloInstruction* operand, HloInstruction* init_value,
- tensorflow::gtl::ArraySlice<int64> dimensions_to_reduce,
+ absl::Span<const int64> dimensions_to_reduce,
HloComputation* reduce_computation) {
auto instruction = absl::WrapUnique(new HloReduceInstruction(
shape, {operand, init_value}, dimensions_to_reduce, reduce_computation));
@@ -876,9 +875,9 @@ HloInstruction::CreateBitcastConvert(const Shape& shape,
}
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateReduce(
- const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> operands,
- tensorflow::gtl::ArraySlice<HloInstruction*> init_values,
- tensorflow::gtl::ArraySlice<int64> dimensions_to_reduce,
+ const Shape& shape, absl::Span<HloInstruction* const> operands,
+ absl::Span<HloInstruction* const> init_values,
+ absl::Span<const int64> dimensions_to_reduce,
HloComputation* reduce_computation) {
std::vector<HloInstruction*> all_args;
all_args.reserve(operands.size() * 2);
@@ -936,7 +935,7 @@ HloInstruction::CreateSelectAndScatter(
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateBroadcast(
const Shape& shape, HloInstruction* operand,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
return absl::make_unique<HloBroadcastInstruction>(shape, operand,
broadcast_dimensions);
}
@@ -1014,7 +1013,7 @@ HloInstruction::CreateBroadcastSequence(
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateTranspose(
const Shape& shape, HloInstruction* operand,
- tensorflow::gtl::ArraySlice<int64> dimensions) {
+ absl::Span<const int64> dimensions) {
return absl::make_unique<HloTransposeInstruction>(shape, operand, dimensions);
}
@@ -1032,7 +1031,7 @@ HloInstruction::CreateBroadcastSequence(
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateFusion(
const Shape& shape, FusionKind fusion_kind,
- tensorflow::gtl::ArraySlice<HloInstruction*> operands,
+ absl::Span<HloInstruction* const> operands,
HloComputation* fusion_computation) {
return absl::make_unique<HloFusionInstruction>(shape, fusion_kind, operands,
fusion_computation);
@@ -1090,7 +1089,7 @@ bool HloInstruction::HasSideEffect() const {
}
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateCall(
- const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> operands,
+ const Shape& shape, absl::Span<HloInstruction* const> operands,
HloComputation* computation) {
std::unique_ptr<HloInstruction> instruction =
absl::WrapUnique(new HloInstruction(HloOpcode::kCall, shape));
@@ -1102,14 +1101,14 @@ bool HloInstruction::HasSideEffect() const {
}
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateCustomCall(
- const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> operands,
+ const Shape& shape, absl::Span<HloInstruction* const> operands,
absl::string_view custom_call_target) {
return absl::make_unique<HloCustomCallInstruction>(shape, operands,
custom_call_target);
}
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateTuple(
- tensorflow::gtl::ArraySlice<HloInstruction*> elements) {
+ absl::Span<HloInstruction* const> elements) {
std::vector<Shape> element_shapes;
for (auto element : elements) {
element_shapes.push_back(element->shape());
@@ -1121,7 +1120,7 @@ bool HloInstruction::HasSideEffect() const {
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateGather(
const Shape& shape, HloInstruction* operand, HloInstruction* start_indices,
const GatherDimensionNumbers& gather_dim_numbers,
- tensorflow::gtl::ArraySlice<int64> slice_sizes) {
+ absl::Span<const int64> slice_sizes) {
return absl::make_unique<HloGatherInstruction>(
shape, operand, start_indices, gather_dim_numbers, slice_sizes);
}
@@ -1149,8 +1148,7 @@ bool HloInstruction::HasSideEffect() const {
}
std::unique_ptr<HloInstruction> HloInstruction::CloneWithNewOperands(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
VLOG(3) << "CloneWithNewOperands:\n " << ToString();
VLOG(3) << " new operands:";
@@ -1501,7 +1499,7 @@ void HloInstruction::AppendOperand(HloInstruction* operand) {
}
void HloInstruction::RemoveOperandsAtAscendingIndices(
- tensorflow::gtl::ArraySlice<int> ascending_indices) {
+ absl::Span<const int> ascending_indices) {
if (ascending_indices.empty()) {
return;
}
@@ -1997,7 +1995,7 @@ string HloInstruction::OperandsToStringWithCanonicalNameMap(
const HloPrintOptions& options,
CanonicalNameMap* canonical_name_map) const {
string operands;
- tensorflow::gtl::ArraySlice<HloInstruction*> slice(operands_);
+ absl::Span<HloInstruction* const> slice(operands_);
const int64 kMaxOperandsToShowIfCompact = 4;
if (options.compact_operands() &&
slice.size() > kMaxOperandsToShowIfCompact) {
@@ -3310,7 +3308,7 @@ const GatherDimensionNumbers& HloInstruction::gather_dimension_numbers() const {
return Cast<HloGatherInstruction>(this)->gather_dimension_numbers();
}
-tensorflow::gtl::ArraySlice<int64> HloInstruction::gather_slice_sizes() const {
+absl::Span<const int64> HloInstruction::gather_slice_sizes() const {
return Cast<HloGatherInstruction>(this)->gather_slice_sizes();
}
diff --git a/tensorflow/compiler/xla/service/hlo_instruction.h b/tensorflow/compiler/xla/service/hlo_instruction.h
index f3fd287d88..88cb5d8acf 100644
--- a/tensorflow/compiler/xla/service/hlo_instruction.h
+++ b/tensorflow/compiler/xla/service/hlo_instruction.h
@@ -365,7 +365,7 @@ class HloInstruction {
// random numbers from a given distribution.
static std::unique_ptr<HloInstruction> CreateRng(
const Shape& shape, RandomDistribution distribution,
- tensorflow::gtl::ArraySlice<HloInstruction*> parameters);
+ absl::Span<HloInstruction* const> parameters);
// Creates a unary instruction (one operand).
// Precondition: opcode must be a legitimate unary operation.
@@ -392,13 +392,13 @@ class HloInstruction {
// Precondition: opcode must be a legitimate variadic operation.
static std::unique_ptr<HloInstruction> CreateVariadic(
const Shape& shape, HloOpcode opcode,
- tensorflow::gtl::ArraySlice<HloInstruction*> operands);
+ absl::Span<HloInstruction* const> operands);
// Creates a map instruction, where the computation (given by the handle) is
// applied element-wise to every element in operands (across the operands,
// at a given index)
static std::unique_ptr<HloInstruction> CreateMap(
- const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> operands,
+ const Shape& shape, absl::Span<HloInstruction* const> operands,
HloComputation* map_computation);
// Creates a convolution op, where rhs is the convolutional filter
@@ -412,7 +412,7 @@ class HloInstruction {
// Creates an FFT op, of the type indicated by fft_type.
static std::unique_ptr<HloInstruction> CreateFft(
const Shape& shape, HloInstruction* operand, FftType fft_type,
- tensorflow::gtl::ArraySlice<int64> fft_length);
+ absl::Span<const int64> fft_length);
// Creates a dot op with operands 'lhs' and 'rhs' with contracting and batch
// dimensions specified in 'dimension_numbers'.
@@ -449,7 +449,7 @@ class HloInstruction {
//
// TODO(b/79737069): Rename this to AllReduce.
static std::unique_ptr<HloInstruction> CreateCrossReplicaSum(
- const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> operands,
+ const Shape& shape, absl::Span<HloInstruction* const> operands,
HloComputation* reduce_computation,
const std::vector<ReplicaGroup>& replica_groups,
absl::string_view barrier, const absl::optional<int64>& all_reduce_id);
@@ -468,7 +468,7 @@ class HloInstruction {
// be concatenated in the order of 1, 2, 3; another Alltoall will be applied
// within replica 4, 5, 0, and the concatenation order is 4, 5, 0.
static std::unique_ptr<HloInstruction> CreateAllToAll(
- const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> operands,
+ const Shape& shape, absl::Span<HloInstruction* const> operands,
const std::vector<ReplicaGroup>& replica_groups);
// Creates a communitation instructions that permutes data cross replicas.
@@ -536,17 +536,15 @@ class HloInstruction {
// start/limit indices.
static std::unique_ptr<HloInstruction> CreateSlice(
const Shape& shape, HloInstruction* operand,
- tensorflow::gtl::ArraySlice<int64> start_indices,
- tensorflow::gtl::ArraySlice<int64> limit_indices,
- tensorflow::gtl::ArraySlice<int64> strides);
+ absl::Span<const int64> start_indices,
+ absl::Span<const int64> limit_indices, absl::Span<const int64> strides);
// Creates a slice instruction, where the first operand is sliced by
// start indices specified in the second operand, and by size specified in
// 'slice_sizes'.
static std::unique_ptr<HloInstruction> CreateDynamicSlice(
const Shape& shape, HloInstruction* operand,
- HloInstruction* start_indices,
- tensorflow::gtl::ArraySlice<int64> slice_sizes);
+ HloInstruction* start_indices, absl::Span<const int64> slice_sizes);
// Creates a dynamic update slice instruction, which updates a slice
// of 'operand' with 'update' and 'start_indices'.
@@ -557,7 +555,7 @@ class HloInstruction {
// Creates a concatenate instruction, where the operands are concatenated on
// the provided dimension.
static std::unique_ptr<HloInstruction> CreateConcatenate(
- const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> operands,
+ const Shape& shape, absl::Span<HloInstruction* const> operands,
int64 dimension);
// Creates a reduce instruction, where the computation (given by the handle)
@@ -569,7 +567,7 @@ class HloInstruction {
// f(f(init, value0), value1), ...)
static std::unique_ptr<HloInstruction> CreateReduce(
const Shape& shape, HloInstruction* operand, HloInstruction* init_value,
- tensorflow::gtl::ArraySlice<int64> dimensions_to_reduce,
+ absl::Span<const int64> dimensions_to_reduce,
HloComputation* reduce_computation);
// A more general, multiple-argument version of the above.
@@ -584,9 +582,9 @@ class HloInstruction {
// ...
// TODO(b/112040122): Add support to this in HLO passes and in backends.
static std::unique_ptr<HloInstruction> CreateReduce(
- const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> operands,
- tensorflow::gtl::ArraySlice<HloInstruction*> init_values,
- tensorflow::gtl::ArraySlice<int64> dimensions_to_reduce,
+ const Shape& shape, absl::Span<HloInstruction* const> operands,
+ absl::Span<HloInstruction* const> init_values,
+ absl::Span<const int64> dimensions_to_reduce,
HloComputation* reduce_computation);
// Creates a reduce-window instruction, where the computation (given
@@ -623,7 +621,7 @@ class HloInstruction {
// Creates a broadcast instruction.
static std::unique_ptr<HloInstruction> CreateBroadcast(
const Shape& shape, HloInstruction* operand,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ absl::Span<const int64> broadcast_dimensions);
// Creates a sequence of instructions that performs an explicit broadcast of
// the operand to the target shape.
@@ -653,7 +651,7 @@ class HloInstruction {
// Creates a transpose instruction which permutes the operand dimensions.
static std::unique_ptr<HloInstruction> CreateTranspose(
const Shape& shape, HloInstruction* operand,
- tensorflow::gtl::ArraySlice<int64> dimensions);
+ absl::Span<const int64> dimensions);
// Creates a sort op, with a keys operand, and an optional values operand.
static std::unique_ptr<HloInstruction> CreateSort(
@@ -679,7 +677,7 @@ class HloInstruction {
const Shape& shape, HloInstruction* operand,
HloInstruction* start_indices,
const GatherDimensionNumbers& gather_dim_numbers,
- tensorflow::gtl::ArraySlice<int64> slice_sizes);
+ absl::Span<const int64> slice_sizes);
static std::unique_ptr<HloInstruction> CreateScatter(
const Shape& shape, HloInstruction* operand,
@@ -703,37 +701,37 @@ class HloInstruction {
static std::unique_ptr<HloInstruction> CreateFusion(
const Shape& shape, FusionKind fusion_kind,
- tensorflow::gtl::ArraySlice<HloInstruction*> operands,
+ absl::Span<HloInstruction* const> operands,
HloComputation* fusion_computation);
// Creates a call instruction that applies the given computation on the given
// operands. "shape" is the resultant shape.
static std::unique_ptr<HloInstruction> CreateCall(
- const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> operands,
+ const Shape& shape, absl::Span<HloInstruction* const> operands,
HloComputation* computation);
// Creates a custom call instruction that applies the given custom call target
// to the given operands. "shape" is the resultant shape.
static std::unique_ptr<HloInstruction> CreateCustomCall(
- const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> operands,
+ const Shape& shape, absl::Span<HloInstruction* const> operands,
absl::string_view custom_call_target);
// Creates a tuple instruction with the given elements. This is a convenience
// wrapper around CreateVariadic.
static std::unique_ptr<HloInstruction> CreateTuple(
- tensorflow::gtl::ArraySlice<HloInstruction*> elements);
+ absl::Span<HloInstruction* const> elements);
// Creates a reverse instruction, which reverses the order of the elements
// in the specified dimensions.
static std::unique_ptr<HloInstruction> CreateReverse(
const Shape& shape, HloInstruction* operand,
- tensorflow::gtl::ArraySlice<int64> dimensions);
+ absl::Span<const int64> dimensions);
// Creates a Afterall instruction used for joining or creating new values of
// token type which thread through side-effecting operations. Operands must
// all be tokens, and there must be at least one operand.
static std::unique_ptr<HloInstruction> CreateAfterAll(
- tensorflow::gtl::ArraySlice<HloInstruction*> operands);
+ absl::Span<HloInstruction* const> operands);
// Creates an AfterAll instruction which creates a token type out of thin air
// (no operands). This is a separate method from CreateAfterAll to facility
@@ -1124,8 +1122,7 @@ class HloInstruction {
// Clones the HLO instruction as above but with new shape and operands.
std::unique_ptr<HloInstruction> CloneWithNewOperands(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context = nullptr) const;
// Returns the computations this instruction directly calls (if any).
@@ -1505,7 +1502,7 @@ class HloInstruction {
// Delegates to HloGatherInstruction::gather_dimension_numbers.
const GatherDimensionNumbers& gather_dimension_numbers() const;
// Delegates to HloGatherInstruction::gather_slice_sizes.
- tensorflow::gtl::ArraySlice<int64> gather_slice_sizes() const;
+ absl::Span<const int64> gather_slice_sizes() const;
// Delegates to HloScatterInstruction::scatter_dimension_numbers().
const ScatterDimensionNumbers& scatter_dimension_numbers() const;
@@ -1531,7 +1528,7 @@ class HloInstruction {
// Removes a list of operands with the given indices in ascending order.
void RemoveOperandsAtAscendingIndices(
- tensorflow::gtl::ArraySlice<int> ascending_indices);
+ absl::Span<const int> ascending_indices);
void AppendComputation(HloComputation* computation) {
called_computations_.push_back(computation);
@@ -1561,8 +1558,7 @@ class HloInstruction {
private:
// Implementation for non-common logic of CloneWithNewOperands.
virtual std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
// TODO(b/80131774): This should be pure virtual.
LOG(FATAL) << "Unimplemented method.";
@@ -1608,7 +1604,7 @@ class HloInstruction {
// Creates an n-ary elementwise operation.
static std::unique_ptr<HloInstruction> CreateNary(
const Shape& shape, HloOpcode opcode,
- tensorflow::gtl::ArraySlice<HloInstruction*> operands);
+ absl::Span<HloInstruction* const> operands);
// Adds a user for this instruction.
void AddUser(HloInstruction* user);
diff --git a/tensorflow/compiler/xla/service/hlo_instructions.cc b/tensorflow/compiler/xla/service/hlo_instructions.cc
index e1c884d856..6871953755 100644
--- a/tensorflow/compiler/xla/service/hlo_instructions.cc
+++ b/tensorflow/compiler/xla/service/hlo_instructions.cc
@@ -91,8 +91,7 @@ HloBatchNormTrainingInstruction::HloBatchNormTrainingInstruction(
std::unique_ptr<HloInstruction>
HloBatchNormTrainingInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
CHECK_EQ(new_operands.size(), 3);
return absl::make_unique<HloBatchNormTrainingInstruction>(
@@ -113,8 +112,7 @@ HloBatchNormInferenceInstruction::HloBatchNormInferenceInstruction(
std::unique_ptr<HloInstruction>
HloBatchNormInferenceInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
CHECK_EQ(new_operands.size(), 5);
return absl::make_unique<HloBatchNormInferenceInstruction>(
@@ -135,8 +133,7 @@ HloBatchNormGradInstruction::HloBatchNormGradInstruction(
std::unique_ptr<HloInstruction>
HloBatchNormGradInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
CHECK_EQ(new_operands.size(), 5);
return absl::make_unique<HloBatchNormGradInstruction>(
@@ -144,9 +141,9 @@ HloBatchNormGradInstruction::CloneWithNewOperandsImpl(
new_operands[4], epsilon(), feature_index());
}
-HloFftInstruction::HloFftInstruction(
- const Shape& shape, HloInstruction* operand, FftType fft_type,
- tensorflow::gtl::ArraySlice<int64> fft_length)
+HloFftInstruction::HloFftInstruction(const Shape& shape,
+ HloInstruction* operand, FftType fft_type,
+ absl::Span<const int64> fft_length)
: HloInstruction(HloOpcode::kFft, shape), fft_type_(fft_type) {
fft_length_.assign(fft_length.begin(), fft_length.end());
AppendOperand(operand);
@@ -177,8 +174,7 @@ bool HloFftInstruction::IdenticalSlowPath(
}
std::unique_ptr<HloInstruction> HloFftInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
CHECK_EQ(new_operands.size(), 1);
return absl::make_unique<HloFftInstruction>(shape, new_operands[0], fft_type_,
@@ -232,8 +228,7 @@ HloSendInstruction::HloSendInstruction(HloInstruction* operand,
}
std::unique_ptr<HloInstruction> HloSendInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
CHECK_EQ(new_operands.size(), 2);
return absl::make_unique<HloSendInstruction>(
@@ -250,8 +245,7 @@ HloSendDoneInstruction::HloSendDoneInstruction(HloSendInstruction* operand,
std::unique_ptr<HloInstruction>
HloSendDoneInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
CHECK_EQ(new_operands.size(), 1);
return absl::make_unique<HloSendDoneInstruction>(
@@ -271,8 +265,7 @@ HloRecvInstruction::HloRecvInstruction(const Shape& shape,
}
std::unique_ptr<HloInstruction> HloRecvInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
CHECK_EQ(new_operands.size(), 1);
return absl::make_unique<HloRecvInstruction>(
@@ -293,8 +286,7 @@ HloRecvDoneInstruction::HloRecvDoneInstruction(HloRecvInstruction* operand,
std::unique_ptr<HloInstruction>
HloRecvDoneInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
CHECK_EQ(new_operands.size(), 1);
return absl::make_unique<HloRecvDoneInstruction>(
@@ -303,7 +295,7 @@ HloRecvDoneInstruction::CloneWithNewOperandsImpl(
HloCollectiveInstruction::HloCollectiveInstruction(
HloOpcode opcode, const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> operands,
+ absl::Span<HloInstruction* const> operands,
const std::vector<ReplicaGroup>& replica_groups)
: HloInstruction(opcode, shape), replica_groups_(replica_groups) {
for (auto operand : operands) {
@@ -344,7 +336,7 @@ bool HloCollectiveInstruction::IdenticalSlowPath(
}
HloAllReduceInstruction::HloAllReduceInstruction(
- const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> operands,
+ const Shape& shape, absl::Span<HloInstruction* const> operands,
HloComputation* reduce_computation,
const std::vector<ReplicaGroup>& replica_groups, absl::string_view barrier,
const absl::optional<int64>& all_reduce_id)
@@ -392,8 +384,7 @@ bool HloAllReduceInstruction::IdenticalSlowPath(
std::unique_ptr<HloInstruction>
HloAllReduceInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* /*context*/) const {
return absl::make_unique<HloAllReduceInstruction>(
shape, new_operands, to_apply(), replica_groups(),
@@ -401,15 +392,14 @@ HloAllReduceInstruction::CloneWithNewOperandsImpl(
}
HloAllToAllInstruction::HloAllToAllInstruction(
- const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> operands,
+ const Shape& shape, absl::Span<HloInstruction* const> operands,
const std::vector<ReplicaGroup>& replica_groups)
: HloCollectiveInstruction(HloOpcode::kAllToAll, shape, operands,
replica_groups) {}
std::unique_ptr<HloInstruction>
HloAllToAllInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* /*context*/) const {
return absl::make_unique<HloAllToAllInstruction>(shape, new_operands,
replica_groups());
@@ -459,16 +449,15 @@ bool HloCollectivePermuteInstruction::IdenticalSlowPath(
std::unique_ptr<HloInstruction>
HloCollectivePermuteInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* /*context*/) const {
return absl::make_unique<HloCollectivePermuteInstruction>(
shape, new_operands[0], source_target_pairs());
}
-HloReverseInstruction::HloReverseInstruction(
- const Shape& shape, HloInstruction* operand,
- tensorflow::gtl::ArraySlice<int64> dimensions)
+HloReverseInstruction::HloReverseInstruction(const Shape& shape,
+ HloInstruction* operand,
+ absl::Span<const int64> dimensions)
: HloInstruction(HloOpcode::kReverse, shape),
dimensions_(dimensions.begin(), dimensions.end()) {
AppendOperand(operand);
@@ -496,8 +485,7 @@ bool HloReverseInstruction::IdenticalSlowPath(
}
std::unique_ptr<HloInstruction> HloReverseInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
CHECK_EQ(new_operands.size(), 1);
return absl::make_unique<HloReverseInstruction>(shape, new_operands[0],
@@ -505,7 +493,7 @@ std::unique_ptr<HloInstruction> HloReverseInstruction::CloneWithNewOperandsImpl(
}
HloConcatenateInstruction::HloConcatenateInstruction(
- const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> operands,
+ const Shape& shape, absl::Span<HloInstruction* const> operands,
int64 dimension)
: HloInstruction(HloOpcode::kConcatenate, shape), dimensions_({dimension}) {
for (auto operand : operands) {
@@ -537,16 +525,15 @@ bool HloConcatenateInstruction::IdenticalSlowPath(
std::unique_ptr<HloInstruction>
HloConcatenateInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
return absl::make_unique<HloConcatenateInstruction>(shape, new_operands,
dimensions(0));
}
HloReduceInstruction::HloReduceInstruction(
- const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> args,
- tensorflow::gtl::ArraySlice<int64> dimensions_to_reduce,
+ const Shape& shape, absl::Span<HloInstruction* const> args,
+ absl::Span<const int64> dimensions_to_reduce,
HloComputation* reduce_computation)
: HloInstruction(HloOpcode::kReduce, shape),
dimensions_(dimensions_to_reduce.begin(), dimensions_to_reduce.end()) {
@@ -581,8 +568,7 @@ bool HloReduceInstruction::IdenticalSlowPath(
}
std::unique_ptr<HloInstruction> HloReduceInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
CHECK_EQ(new_operands.size() % 2, 0);
return absl::make_unique<HloReduceInstruction>(shape, new_operands,
@@ -621,8 +607,7 @@ bool HloSortInstruction::IdenticalSlowPath(
}
std::unique_ptr<HloInstruction> HloSortInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
HloInstruction* keys = new_operands[0];
HloInstruction* values = new_operands.size() == 2 ? new_operands[1] : nullptr;
@@ -632,7 +617,7 @@ std::unique_ptr<HloInstruction> HloSortInstruction::CloneWithNewOperandsImpl(
HloTransposeInstruction::HloTransposeInstruction(
const Shape& shape, HloInstruction* operand,
- tensorflow::gtl::ArraySlice<int64> dimensions)
+ absl::Span<const int64> dimensions)
: HloInstruction(HloOpcode::kTranspose, shape),
dimensions_(dimensions.begin(), dimensions.end()) {
CHECK_EQ(shape.dimensions().size(), dimensions.size());
@@ -676,8 +661,7 @@ bool HloTransposeInstruction::IdenticalSlowPath(
std::unique_ptr<HloInstruction>
HloTransposeInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
CHECK_EQ(new_operands.size(), 1);
return absl::make_unique<HloTransposeInstruction>(shape, new_operands[0],
@@ -686,7 +670,7 @@ HloTransposeInstruction::CloneWithNewOperandsImpl(
HloBroadcastInstruction::HloBroadcastInstruction(
const Shape& shape, HloInstruction* operand,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimension)
+ absl::Span<const int64> broadcast_dimension)
: HloInstruction(HloOpcode::kBroadcast, shape),
dimensions_(broadcast_dimension.begin(), broadcast_dimension.end()) {
AppendOperand(operand);
@@ -715,17 +699,16 @@ bool HloBroadcastInstruction::IdenticalSlowPath(
std::unique_ptr<HloInstruction>
HloBroadcastInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
CHECK_EQ(new_operands.size(), 1);
return absl::make_unique<HloBroadcastInstruction>(shape, new_operands[0],
dimensions());
}
-HloMapInstruction::HloMapInstruction(
- const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> operands,
- HloComputation* map_computation)
+HloMapInstruction::HloMapInstruction(const Shape& shape,
+ absl::Span<HloInstruction* const> operands,
+ HloComputation* map_computation)
: HloInstruction(HloOpcode::kMap, shape) {
for (auto operand : operands) {
AppendOperand(operand);
@@ -774,17 +757,16 @@ bool HloMapInstruction::IdenticalSlowPath(
}
std::unique_ptr<HloInstruction> HloMapInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
return absl::make_unique<HloMapInstruction>(shape, new_operands, to_apply());
}
-HloSliceInstruction::HloSliceInstruction(
- const Shape& shape, HloInstruction* operand,
- tensorflow::gtl::ArraySlice<int64> start_indices,
- tensorflow::gtl::ArraySlice<int64> limit_indices,
- tensorflow::gtl::ArraySlice<int64> strides)
+HloSliceInstruction::HloSliceInstruction(const Shape& shape,
+ HloInstruction* operand,
+ absl::Span<const int64> start_indices,
+ absl::Span<const int64> limit_indices,
+ absl::Span<const int64> strides)
: HloInstruction(HloOpcode::kSlice, shape),
slice_starts_(start_indices.begin(), start_indices.end()),
slice_limits_(limit_indices.begin(), limit_indices.end()),
@@ -835,8 +817,7 @@ bool HloSliceInstruction::IdenticalSlowPath(
}
std::unique_ptr<HloInstruction> HloSliceInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
CHECK_EQ(new_operands.size(), 1);
return absl::make_unique<HloSliceInstruction>(
@@ -889,8 +870,7 @@ bool HloConstantInstruction::IdenticalSlowPath(
std::unique_ptr<HloInstruction>
HloConstantInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
return absl::make_unique<HloConstantInstruction>(literal_->CloneToUnique());
}
@@ -947,8 +927,7 @@ bool HloTraceInstruction::IdenticalSlowPath(
}
std::unique_ptr<HloInstruction> HloTraceInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
LOG(FATAL) << "Not yet implemented, clone: " << HloOpcodeString(opcode());
}
@@ -966,7 +945,7 @@ HloFusionInstruction::HloFusionInstruction(const Shape& shape,
HloFusionInstruction::HloFusionInstruction(
const Shape& shape, FusionKind fusion_kind,
- tensorflow::gtl::ArraySlice<HloInstruction*> operands,
+ absl::Span<HloInstruction* const> operands,
HloComputation* fusion_computation)
: HloInstruction(HloOpcode::kFusion, shape), fusion_kind_(fusion_kind) {
for (auto operand : operands) {
@@ -1373,8 +1352,7 @@ bool HloFusionInstruction::IdenticalSlowPath(
}
std::unique_ptr<HloInstruction> HloFusionInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
HloModule* module = context != nullptr ? context->module() : GetModule();
HloComputation* new_fused_computation = nullptr;
@@ -1412,7 +1390,7 @@ Status HloFusionInstruction::DeduplicateFusionOperands() {
HloRngInstruction::HloRngInstruction(
const Shape& shape, RandomDistribution distribution,
- tensorflow::gtl::ArraySlice<HloInstruction*> parameters)
+ absl::Span<HloInstruction* const> parameters)
: HloInstruction(HloOpcode::kRng, shape), distribution_(distribution) {
for (HloInstruction* param : parameters) {
AppendOperand(param);
@@ -1443,8 +1421,7 @@ bool HloRngInstruction::IdenticalSlowPath(
}
std::unique_ptr<HloInstruction> HloRngInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
return absl::make_unique<HloRngInstruction>(shape, distribution_,
new_operands);
@@ -1480,8 +1457,7 @@ bool HloParameterInstruction::IdenticalSlowPath(
std::unique_ptr<HloInstruction>
HloParameterInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
return absl::make_unique<HloParameterInstruction>(parameter_number_, shape,
name());
@@ -1516,8 +1492,7 @@ bool HloGetTupleElementInstruction::IdenticalSlowPath(
std::unique_ptr<HloInstruction>
HloGetTupleElementInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
CHECK_EQ(new_operands.size(), 1);
return absl::make_unique<HloGetTupleElementInstruction>(
@@ -1559,8 +1534,7 @@ bool HloReducePrecisionInstruction::IdenticalSlowPath(
std::unique_ptr<HloInstruction>
HloReducePrecisionInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
CHECK_EQ(new_operands.size(), 1);
return absl::make_unique<HloReducePrecisionInstruction>(
@@ -1600,8 +1574,7 @@ bool HloInfeedInstruction::IdenticalSlowPath(
}
std::unique_ptr<HloInstruction> HloInfeedInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
CHECK_EQ(new_operands.size(), 1);
return absl::make_unique<HloInfeedInstruction>(
@@ -1646,8 +1619,7 @@ bool HloOutfeedInstruction::IdenticalSlowPath(
}
std::unique_ptr<HloInstruction> HloOutfeedInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
CHECK_EQ(new_operands.size(), 2);
return absl::make_unique<HloOutfeedInstruction>(
@@ -1717,8 +1689,7 @@ bool HloConvolutionInstruction::IdenticalSlowPath(
std::unique_ptr<HloInstruction>
HloConvolutionInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
CHECK_EQ(new_operands.size(), 2);
return absl::make_unique<HloConvolutionInstruction>(
@@ -1762,8 +1733,7 @@ bool HloReduceWindowInstruction::IdenticalSlowPath(
std::unique_ptr<HloInstruction>
HloReduceWindowInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
CHECK_EQ(new_operands.size(), 2);
return absl::make_unique<HloReduceWindowInstruction>(
@@ -1811,8 +1781,7 @@ bool HloSelectAndScatterInstruction::IdenticalSlowPath(
std::unique_ptr<HloInstruction>
HloSelectAndScatterInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
CHECK_EQ(new_operands.size(), 3);
return absl::make_unique<HloSelectAndScatterInstruction>(
@@ -1821,7 +1790,7 @@ HloSelectAndScatterInstruction::CloneWithNewOperandsImpl(
}
HloCustomCallInstruction::HloCustomCallInstruction(
- const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> operands,
+ const Shape& shape, absl::Span<HloInstruction* const> operands,
absl::string_view custom_call_target)
: HloInstruction(HloOpcode::kCustomCall, shape),
custom_call_target_(custom_call_target.begin(),
@@ -1887,8 +1856,7 @@ bool HloCustomCallInstruction::IdenticalSlowPath(
std::unique_ptr<HloInstruction>
HloCustomCallInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
auto cloned = absl::make_unique<HloCustomCallInstruction>(
shape, new_operands, custom_call_target());
@@ -1931,8 +1899,7 @@ bool HloPadInstruction::IdenticalSlowPath(
}
std::unique_ptr<HloInstruction> HloPadInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
CHECK_EQ(new_operands.size(), 2);
return absl::make_unique<HloPadInstruction>(shape, new_operands[0],
@@ -1941,7 +1908,7 @@ std::unique_ptr<HloInstruction> HloPadInstruction::CloneWithNewOperandsImpl(
HloDynamicSliceInstruction::HloDynamicSliceInstruction(
const Shape& shape, HloInstruction* operand, HloInstruction* start_indices,
- tensorflow::gtl::ArraySlice<int64> slice_sizes)
+ absl::Span<const int64> slice_sizes)
: HloInstruction(HloOpcode::kDynamicSlice, shape),
dynamic_slice_sizes_(slice_sizes.begin(), slice_sizes.end()) {
AppendOperand(operand);
@@ -1971,8 +1938,7 @@ bool HloDynamicSliceInstruction::IdenticalSlowPath(
std::unique_ptr<HloInstruction>
HloDynamicSliceInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
CHECK_EQ(new_operands.size(), 2);
return absl::make_unique<HloDynamicSliceInstruction>(
@@ -1982,7 +1948,7 @@ HloDynamicSliceInstruction::CloneWithNewOperandsImpl(
HloGatherInstruction::HloGatherInstruction(
const Shape& shape, HloInstruction* operand, HloInstruction* start_indices,
const GatherDimensionNumbers& gather_dim_numbers,
- tensorflow::gtl::ArraySlice<int64> slice_sizes)
+ absl::Span<const int64> slice_sizes)
: HloInstruction(HloOpcode::kGather, shape) {
AppendOperand(operand);
AppendOperand(start_indices);
@@ -2011,10 +1977,9 @@ string HloGatherInstruction::GatherDimensionNumbersToString() const {
}
/* static */ GatherDimensionNumbers HloGatherInstruction::MakeGatherDimNumbers(
- tensorflow::gtl::ArraySlice<int64> offset_dims,
- tensorflow::gtl::ArraySlice<int64> collapsed_slice_dims,
- tensorflow::gtl::ArraySlice<int64> start_index_map,
- int64 index_vector_dim) {
+ absl::Span<const int64> offset_dims,
+ absl::Span<const int64> collapsed_slice_dims,
+ absl::Span<const int64> start_index_map, int64 index_vector_dim) {
GatherDimensionNumbers gather_dim_numbers;
for (int64 output_window_dim : offset_dims) {
gather_dim_numbers.add_offset_dims(output_window_dim);
@@ -2057,8 +2022,7 @@ bool HloGatherInstruction::IdenticalSlowPath(
}
std::unique_ptr<HloInstruction> HloGatherInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
CHECK_EQ(new_operands.size(), 2);
return absl::make_unique<HloGatherInstruction>(
@@ -2102,9 +2066,9 @@ string HloScatterInstruction::ScatterDimensionNumbersToString() const {
/* static */ ScatterDimensionNumbers
HloScatterInstruction::MakeScatterDimNumbers(
- tensorflow::gtl::ArraySlice<int64> update_window_dims,
- tensorflow::gtl::ArraySlice<int64> inserted_window_dims,
- tensorflow::gtl::ArraySlice<int64> scatter_dims_to_operand_dims,
+ absl::Span<const int64> update_window_dims,
+ absl::Span<const int64> inserted_window_dims,
+ absl::Span<const int64> scatter_dims_to_operand_dims,
int64 index_vector_dim) {
ScatterDimensionNumbers scatter_dim_numbers;
for (int64 update_window_dim : update_window_dims) {
@@ -2144,8 +2108,7 @@ bool HloScatterInstruction::IdenticalSlowPath(
}
std::unique_ptr<HloInstruction> HloScatterInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
CHECK_EQ(new_operands.size(), 3);
return absl::make_unique<HloScatterInstruction>(
@@ -2177,8 +2140,7 @@ bool HloIotaInstruction::IdenticalSlowPath(
}
std::unique_ptr<HloInstruction> HloIotaInstruction::CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
return absl::make_unique<HloIotaInstruction>(shape, iota_dimension());
}
diff --git a/tensorflow/compiler/xla/service/hlo_instructions.h b/tensorflow/compiler/xla/service/hlo_instructions.h
index 4fe5144aca..45a648bbe4 100644
--- a/tensorflow/compiler/xla/service/hlo_instructions.h
+++ b/tensorflow/compiler/xla/service/hlo_instructions.h
@@ -67,8 +67,7 @@ class HloBatchNormTrainingInstruction : public HloBatchNormInstruction {
private:
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
};
@@ -82,8 +81,7 @@ class HloBatchNormInferenceInstruction : public HloBatchNormInstruction {
private:
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
};
@@ -97,8 +95,7 @@ class HloBatchNormGradInstruction : public HloBatchNormInstruction {
private:
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
};
@@ -106,7 +103,7 @@ class HloFftInstruction : public HloInstruction {
public:
explicit HloFftInstruction(const Shape& shape, HloInstruction* operand,
FftType fft_type,
- tensorflow::gtl::ArraySlice<int64> fft_length);
+ absl::Span<const int64> fft_length);
FftType fft_type() const { return fft_type_; }
const std::vector<int64>& fft_length() const { return fft_length_; }
@@ -124,8 +121,7 @@ class HloFftInstruction : public HloInstruction {
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
// Describes FFT type for an FFT instruction.
@@ -174,8 +170,7 @@ class HloSendInstruction : public HloSendRecvInstruction {
private:
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
};
@@ -187,8 +182,7 @@ class HloSendDoneInstruction : public HloSendRecvInstruction {
private:
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
};
@@ -200,8 +194,7 @@ class HloRecvInstruction : public HloSendRecvInstruction {
private:
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
};
@@ -213,8 +206,7 @@ class HloRecvDoneInstruction : public HloSendRecvInstruction {
private:
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
};
@@ -227,7 +219,7 @@ class HloCollectiveInstruction : public HloInstruction {
protected:
explicit HloCollectiveInstruction(
HloOpcode opcode, const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> operands,
+ absl::Span<HloInstruction* const> operands,
const std::vector<ReplicaGroup>& replica_groups);
HloInstructionProto ToProto() const override;
@@ -245,7 +237,7 @@ class HloCollectiveInstruction : public HloInstruction {
class HloAllReduceInstruction : public HloCollectiveInstruction {
public:
explicit HloAllReduceInstruction(
- const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> operands,
+ const Shape& shape, absl::Span<HloInstruction* const> operands,
HloComputation* reduce_computation,
const std::vector<ReplicaGroup>& replica_groups,
absl::string_view barrier, const absl::optional<int64>& all_reduce_id);
@@ -274,8 +266,7 @@ class HloAllReduceInstruction : public HloCollectiveInstruction {
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
// The string representation of the barrier config used for CrossReplicaSum.
@@ -290,14 +281,13 @@ class HloAllReduceInstruction : public HloCollectiveInstruction {
class HloAllToAllInstruction : public HloCollectiveInstruction {
public:
explicit HloAllToAllInstruction(
- const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> operands,
+ const Shape& shape, absl::Span<HloInstruction* const> operands,
const std::vector<ReplicaGroup>& replica_groups);
private:
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
};
@@ -324,8 +314,7 @@ class HloCollectivePermuteInstruction : public HloInstruction {
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
const std::vector<std::pair<int64, int64>> source_target_pairs_;
@@ -334,7 +323,7 @@ class HloCollectivePermuteInstruction : public HloInstruction {
class HloReverseInstruction : public HloInstruction {
public:
explicit HloReverseInstruction(const Shape& shape, HloInstruction* operand,
- tensorflow::gtl::ArraySlice<int64> dimensions);
+ absl::Span<const int64> dimensions);
// Returns the dimension sizes or numbers associated with this instruction.
const std::vector<int64>& dimensions() const override { return dimensions_; }
int64 dimensions(int64 index) const override { return dimensions()[index]; }
@@ -350,8 +339,7 @@ class HloReverseInstruction : public HloInstruction {
eq_computations) const override;
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
std::vector<int64> dimensions_;
@@ -359,9 +347,9 @@ class HloReverseInstruction : public HloInstruction {
class HloConcatenateInstruction : public HloInstruction {
public:
- explicit HloConcatenateInstruction(
- const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> operands,
- int64 dimension);
+ explicit HloConcatenateInstruction(const Shape& shape,
+ absl::Span<HloInstruction* const> operands,
+ int64 dimension);
// Returns the dimension sizes or numbers associated with this instruction.
const std::vector<int64>& dimensions() const override { return dimensions_; }
int64 dimensions(int64 index) const override { return dimensions()[index]; }
@@ -379,8 +367,7 @@ class HloConcatenateInstruction : public HloInstruction {
eq_computations) const override;
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
std::vector<int64> dimensions_;
@@ -388,10 +375,10 @@ class HloConcatenateInstruction : public HloInstruction {
class HloReduceInstruction : public HloInstruction {
public:
- explicit HloReduceInstruction(
- const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> args,
- tensorflow::gtl::ArraySlice<int64> dimensions_to_reduce,
- HloComputation* reduce_computation);
+ explicit HloReduceInstruction(const Shape& shape,
+ absl::Span<HloInstruction* const> args,
+ absl::Span<const int64> dimensions_to_reduce,
+ HloComputation* reduce_computation);
// Returns the dimension sizes or numbers associated with this instruction.
const std::vector<int64>& dimensions() const override { return dimensions_; }
int64 dimensions(int64 index) const override { return dimensions()[index]; }
@@ -403,12 +390,12 @@ class HloReduceInstruction : public HloInstruction {
int64 input_count() const { return operand_count() / 2; }
// Returns the input tensors to be reduced.
- tensorflow::gtl::ArraySlice<HloInstruction*> inputs() const {
+ absl::Span<HloInstruction* const> inputs() const {
return absl::MakeSpan(operands()).subspan(0, input_count());
}
// Returns the init values of the reduction.
- tensorflow::gtl::ArraySlice<HloInstruction*> init_values() const {
+ absl::Span<HloInstruction* const> init_values() const {
return absl::MakeSpan(operands()).subspan(input_count(), operand_count());
}
@@ -421,8 +408,7 @@ class HloReduceInstruction : public HloInstruction {
eq_computations) const override;
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
std::vector<int64> dimensions_;
@@ -450,8 +436,7 @@ class HloSortInstruction : public HloInstruction {
eq_computations) const override;
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
std::vector<int64> dimensions_;
@@ -459,9 +444,8 @@ class HloSortInstruction : public HloInstruction {
class HloTransposeInstruction : public HloInstruction {
public:
- explicit HloTransposeInstruction(
- const Shape& shape, HloInstruction* operand,
- tensorflow::gtl::ArraySlice<int64> dimensions);
+ explicit HloTransposeInstruction(const Shape& shape, HloInstruction* operand,
+ absl::Span<const int64> dimensions);
// Returns the dimension sizes or numbers associated with this instruction.
const std::vector<int64>& dimensions() const override { return dimensions_; }
int64 dimensions(int64 index) const override { return dimensions()[index]; }
@@ -479,8 +463,7 @@ class HloTransposeInstruction : public HloInstruction {
eq_computations) const override;
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
std::vector<int64> dimensions_;
@@ -488,9 +471,8 @@ class HloTransposeInstruction : public HloInstruction {
class HloBroadcastInstruction : public HloInstruction {
public:
- explicit HloBroadcastInstruction(
- const Shape& shape, HloInstruction* operand,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimension);
+ explicit HloBroadcastInstruction(const Shape& shape, HloInstruction* operand,
+ absl::Span<const int64> broadcast_dimension);
// Returns the dimension sizes or numbers associated with this instruction.
const std::vector<int64>& dimensions() const override { return dimensions_; }
int64 dimensions(int64 index) const override { return dimensions()[index]; }
@@ -506,8 +488,7 @@ class HloBroadcastInstruction : public HloInstruction {
eq_computations) const override;
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
std::vector<int64> dimensions_;
@@ -515,9 +496,9 @@ class HloBroadcastInstruction : public HloInstruction {
class HloMapInstruction : public HloInstruction {
public:
- explicit HloMapInstruction(
- const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> operands,
- HloComputation* map_computation);
+ explicit HloMapInstruction(const Shape& shape,
+ absl::Span<HloInstruction* const> operands,
+ HloComputation* map_computation);
// Returns the dimension sizes or numbers associated with this instruction.
const std::vector<int64>& dimensions() const override { return dimensions_; }
int64 dimensions(int64 index) const override { return dimensions()[index]; }
@@ -535,8 +516,7 @@ class HloMapInstruction : public HloInstruction {
eq_computations) const override;
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
std::vector<int64> dimensions_;
@@ -545,9 +525,9 @@ class HloMapInstruction : public HloInstruction {
class HloSliceInstruction : public HloInstruction {
public:
explicit HloSliceInstruction(const Shape& shape, HloInstruction* operand,
- tensorflow::gtl::ArraySlice<int64> start_indices,
- tensorflow::gtl::ArraySlice<int64> limit_indices,
- tensorflow::gtl::ArraySlice<int64> strides);
+ absl::Span<const int64> start_indices,
+ absl::Span<const int64> limit_indices,
+ absl::Span<const int64> strides);
HloInstructionProto ToProto() const override;
@@ -586,8 +566,7 @@ class HloSliceInstruction : public HloInstruction {
eq_computations) const override;
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
// Describes the [begin, end) index range for a slice.
@@ -629,8 +608,7 @@ class HloConstantInstruction : public HloInstruction {
CanonicalNameMap* canonical_name_map) const override;
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
// TODO(b/36360764): Remove unique_ptr wrapping.
std::unique_ptr<Literal> literal_;
@@ -651,8 +629,7 @@ class HloTraceInstruction : public HloInstruction {
eq_computations) const override;
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
// TODO(b/36360764): Remove unique_ptr wrapping.
std::unique_ptr<Literal> literal_;
@@ -663,10 +640,9 @@ class HloFusionInstruction : public HloInstruction {
explicit HloFusionInstruction(const Shape& shape, FusionKind fusion_kind,
HloInstruction* fused_root);
- explicit HloFusionInstruction(
- const Shape& shape, FusionKind fusion_kind,
- tensorflow::gtl::ArraySlice<HloInstruction*> operands,
- HloComputation* fusion_computation);
+ explicit HloFusionInstruction(const Shape& shape, FusionKind fusion_kind,
+ absl::Span<HloInstruction* const> operands,
+ HloComputation* fusion_computation);
string ToCategory() const override;
// Returns a serialized representation of this instruction.
@@ -779,8 +755,7 @@ class HloFusionInstruction : public HloInstruction {
eq_computations) const override;
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
// The type of the fusion. Used by kFusion only.
@@ -789,9 +764,9 @@ class HloFusionInstruction : public HloInstruction {
class HloRngInstruction : public HloInstruction {
public:
- explicit HloRngInstruction(
- const Shape& shape, RandomDistribution distribution,
- tensorflow::gtl::ArraySlice<HloInstruction*> parameters);
+ explicit HloRngInstruction(const Shape& shape,
+ RandomDistribution distribution,
+ absl::Span<HloInstruction* const> parameters);
// Returns the random distribution for this rng node.
RandomDistribution random_distribution() const { return distribution_; }
// Returns a serialized representation of this instruction.
@@ -808,8 +783,7 @@ class HloRngInstruction : public HloInstruction {
eq_computations) const override;
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
// The distribution requested for random number generation.
@@ -834,8 +808,7 @@ class HloParameterInstruction : public HloInstruction {
CanonicalNameMap* canonical_name_map) const override;
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
int64 parameter_number_ = 0;
@@ -859,8 +832,7 @@ class HloGetTupleElementInstruction : public HloInstruction {
eq_computations) const override;
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
int64 tuple_index_ = -1;
@@ -888,8 +860,7 @@ class HloReducePrecisionInstruction : public HloInstruction {
eq_computations) const override;
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
// The bit sizes for a reduce-precision operation.
@@ -926,8 +897,7 @@ class HloInfeedInstruction : public HloInstruction {
eq_computations) const override;
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
// The string representation of the infeed configuration.
@@ -959,8 +929,7 @@ class HloOutfeedInstruction : public HloInstruction {
eq_computations) const override;
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
// Shape of outfeed request.
@@ -1001,8 +970,7 @@ class HloConvolutionInstruction : public HloInstruction {
eq_computations) const override;
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
Window window_;
// Describes the dimension numbers used for a convolution.
@@ -1033,8 +1001,7 @@ class HloReduceWindowInstruction : public HloInstruction {
eq_computations) const override;
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
Window window_;
};
@@ -1082,17 +1049,16 @@ class HloSelectAndScatterInstruction : public HloInstruction {
eq_computations) const override;
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
Window window_;
};
class HloCustomCallInstruction : public HloInstruction {
public:
- explicit HloCustomCallInstruction(
- const Shape& shape, tensorflow::gtl::ArraySlice<HloInstruction*> operands,
- absl::string_view custom_call_target);
+ explicit HloCustomCallInstruction(const Shape& shape,
+ absl::Span<HloInstruction* const> operands,
+ absl::string_view custom_call_target);
const Window& window() const override {
CHECK(window_ != nullptr);
return *window_;
@@ -1125,8 +1091,7 @@ class HloCustomCallInstruction : public HloInstruction {
eq_computations) const override;
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
// Name of a global symbol to call, only present for kCustomCall.
string custom_call_target_;
@@ -1155,8 +1120,7 @@ class HloPadInstruction : public HloInstruction {
eq_computations) const override;
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
// The padding configuration that describes the edge padding and interior
@@ -1166,10 +1130,10 @@ class HloPadInstruction : public HloInstruction {
class HloDynamicSliceInstruction : public HloInstruction {
public:
- explicit HloDynamicSliceInstruction(
- const Shape& shape, HloInstruction* operand,
- HloInstruction* start_indices,
- tensorflow::gtl::ArraySlice<int64> slice_sizes);
+ explicit HloDynamicSliceInstruction(const Shape& shape,
+ HloInstruction* operand,
+ HloInstruction* start_indices,
+ absl::Span<const int64> slice_sizes);
// Old methods kept for smooth subclassing transition END.
// Returns the size of the slice in the given dimension for a dynamic
// slice node.
@@ -1191,8 +1155,7 @@ class HloDynamicSliceInstruction : public HloInstruction {
eq_computations) const override;
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
// Describes the [start, start + size) range size for a dynamic slice
@@ -1206,12 +1169,12 @@ class HloGatherInstruction : public HloInstruction {
const Shape& shape, HloInstruction* operand,
HloInstruction* start_indices,
const GatherDimensionNumbers& gather_dim_numbers,
- tensorflow::gtl::ArraySlice<int64> slice_sizes);
+ absl::Span<const int64> slice_sizes);
const GatherDimensionNumbers& gather_dimension_numbers() const {
CHECK(gather_dimension_numbers_ != nullptr);
return *gather_dimension_numbers_;
}
- tensorflow::gtl::ArraySlice<int64> gather_slice_sizes() const {
+ absl::Span<const int64> gather_slice_sizes() const {
return gather_slice_sizes_;
}
// Returns the dump string of the gather dimension numbers.
@@ -1221,10 +1184,9 @@ class HloGatherInstruction : public HloInstruction {
// Creates an instance of GatherDimensionNumbers.
static GatherDimensionNumbers MakeGatherDimNumbers(
- tensorflow::gtl::ArraySlice<int64> offset_dims,
- tensorflow::gtl::ArraySlice<int64> collapsed_slice_dims,
- tensorflow::gtl::ArraySlice<int64> start_index_map,
- int64 index_vector_dim);
+ absl::Span<const int64> offset_dims,
+ absl::Span<const int64> collapsed_slice_dims,
+ absl::Span<const int64> start_index_map, int64 index_vector_dim);
private:
std::vector<string> ExtraAttributesToStringImpl(
@@ -1234,8 +1196,7 @@ class HloGatherInstruction : public HloInstruction {
const std::function<bool(const HloComputation*, const HloComputation*)>&
eq_computations) const override;
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
std::unique_ptr<GatherDimensionNumbers> gather_dimension_numbers_;
@@ -1260,9 +1221,9 @@ class HloScatterInstruction : public HloInstruction {
// Creates an instance of ScatterDimensionNumbers.
static ScatterDimensionNumbers MakeScatterDimNumbers(
- tensorflow::gtl::ArraySlice<int64> update_window_dims,
- tensorflow::gtl::ArraySlice<int64> inserted_window_dims,
- tensorflow::gtl::ArraySlice<int64> scatter_dims_to_operand_dims,
+ absl::Span<const int64> update_window_dims,
+ absl::Span<const int64> inserted_window_dims,
+ absl::Span<const int64> scatter_dims_to_operand_dims,
int64 index_vector_dim);
private:
@@ -1274,8 +1235,7 @@ class HloScatterInstruction : public HloInstruction {
eq_computations) const override;
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
std::unique_ptr<ScatterDimensionNumbers> scatter_dimension_numbers_;
@@ -1298,8 +1258,7 @@ class HloIotaInstruction : public HloInstruction {
eq_computations) const override;
// Implementation for non-common logic of CloneWithNewOperands.
std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
- const Shape& shape,
- tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const override;
const int64 iota_dimension_;
diff --git a/tensorflow/compiler/xla/service/hlo_module.cc b/tensorflow/compiler/xla/service/hlo_module.cc
index 78167335c8..3a1bc4e328 100644
--- a/tensorflow/compiler/xla/service/hlo_module.cc
+++ b/tensorflow/compiler/xla/service/hlo_module.cc
@@ -353,7 +353,7 @@ bool IsUsedOutsideSubcomputation(
} // anonymous namespace
HloInstruction* HloModule::OutlineExpressionFromComputation(
- tensorflow::gtl::ArraySlice<HloInstruction*> instructions_to_outline,
+ absl::Span<HloInstruction* const> instructions_to_outline,
const string& outlined_computation_name, HloComputation* computation) {
auto builder = HloComputation::Builder(outlined_computation_name);
diff --git a/tensorflow/compiler/xla/service/hlo_module.h b/tensorflow/compiler/xla/service/hlo_module.h
index cf129b835d..ee5601beec 100644
--- a/tensorflow/compiler/xla/service/hlo_module.h
+++ b/tensorflow/compiler/xla/service/hlo_module.h
@@ -192,7 +192,7 @@ class HloModule {
// order (root of outlined instructions last). TODO(jingyue): takes a set of
// instructions and topologically sorts them.
HloInstruction* OutlineExpressionFromComputation(
- tensorflow::gtl::ArraySlice<HloInstruction*> instructions_to_outline,
+ absl::Span<HloInstruction* const> instructions_to_outline,
const string& outlined_computation_name, HloComputation* computation);
// Returns a randomly generated uint64.
diff --git a/tensorflow/compiler/xla/service/hlo_module_group_util.cc b/tensorflow/compiler/xla/service/hlo_module_group_util.cc
index d70328c8a3..d83ee71490 100644
--- a/tensorflow/compiler/xla/service/hlo_module_group_util.cc
+++ b/tensorflow/compiler/xla/service/hlo_module_group_util.cc
@@ -193,7 +193,7 @@ std::vector<HloInstruction*> HloModuleGroupUtil::GlobalSuccessors(
}
std::vector<HloInstruction*> HloModuleGroupUtil::RootInstructions(
- tensorflow::gtl::ArraySlice<HloComputation*> computations) {
+ absl::Span<HloComputation* const> computations) {
std::vector<HloInstruction*> roots;
for (HloComputation* computation : computations) {
for (HloInstruction* instruction : computation->instructions()) {
@@ -293,7 +293,7 @@ Status HloModuleGroupUtil::VisitTopologicalOrder(
}
Status HloModuleGroupUtil::VerifyComputations(
- tensorflow::gtl::ArraySlice<HloComputation*> computations) {
+ absl::Span<HloComputation* const> computations) {
auto visit_function =
[&](HloInstruction* instruction,
const std::vector<HloInstruction*>& instruction_group) {
@@ -324,7 +324,7 @@ Status HloModuleGroupUtil::VerifyComputations(
StatusOr<std::unique_ptr<HloReachabilityMap>>
HloModuleGroupUtil::ComputeReachability(
- tensorflow::gtl::ArraySlice<HloComputation*> computations) {
+ absl::Span<HloComputation* const> computations) {
std::vector<HloInstruction*> post_order;
auto visit_function =
[&](HloInstruction* instruction,
diff --git a/tensorflow/compiler/xla/service/hlo_module_group_util.h b/tensorflow/compiler/xla/service/hlo_module_group_util.h
index c25ca1aff5..fe11fe1818 100644
--- a/tensorflow/compiler/xla/service/hlo_module_group_util.h
+++ b/tensorflow/compiler/xla/service/hlo_module_group_util.h
@@ -56,7 +56,7 @@ class HloModuleGroupUtil {
// Returns the root instructions of the computations.
std::vector<HloInstruction*> RootInstructions(
- tensorflow::gtl::ArraySlice<HloComputation*> computations);
+ absl::Span<HloComputation* const> computations);
// Visit state of each instruction during DFS traversal.
enum VisitState {
@@ -93,15 +93,14 @@ class HloModuleGroupUtil {
HloInstruction* root);
// Verifies that the computations are well-formed (e.g., no cycles).
- Status VerifyComputations(
- tensorflow::gtl::ArraySlice<HloComputation*> computations);
+ Status VerifyComputations(absl::Span<HloComputation* const> computations);
// Below Reachability utils resemble those in HloComputation, except that
// they can handle instructions across multiple computations.
//
// Creates the reachability map for the instructions in the computations.
StatusOr<std::unique_ptr<HloReachabilityMap>> ComputeReachability(
- tensorflow::gtl::ArraySlice<HloComputation*> computations);
+ absl::Span<HloComputation* const> computations);
// Updates the reachability of the given instruction, taking the global
// predeccessorss and successors into account.
diff --git a/tensorflow/compiler/xla/service/hlo_module_test.cc b/tensorflow/compiler/xla/service/hlo_module_test.cc
index 209ad5e58c..80009c7f7e 100644
--- a/tensorflow/compiler/xla/service/hlo_module_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_module_test.cc
@@ -44,7 +44,7 @@ class HloModuleTest : public HloTestBase {
// Creates a computation which calls the given zero-parameter computations.
std::unique_ptr<HloComputation> CreateCallComputation(
- tensorflow::gtl::ArraySlice<HloComputation*> computations) {
+ absl::Span<HloComputation* const> computations) {
auto builder = HloComputation::Builder("Call");
for (auto computation : computations) {
builder.AddInstruction(
diff --git a/tensorflow/compiler/xla/service/hlo_parser.cc b/tensorflow/compiler/xla/service/hlo_parser.cc
index b93e4f24f6..02201d4542 100644
--- a/tensorflow/compiler/xla/service/hlo_parser.cc
+++ b/tensorflow/compiler/xla/service/hlo_parser.cc
@@ -306,7 +306,7 @@ bool SplitToInt64s(absl::string_view s, char delim, std::vector<int64>* out) {
// Creates replica groups from the provided nested array. groups[i] represents
// the replica ids for group 'i'.
std::vector<ReplicaGroup> CreateReplicaGroups(
- tensorflow::gtl::ArraySlice<std::vector<int64>> groups) {
+ absl::Span<const std::vector<int64>> groups) {
std::vector<ReplicaGroup> replica_groups;
absl::c_transform(groups, std::back_inserter(replica_groups),
[](const std::vector<int64>& ids) {
@@ -997,10 +997,10 @@ bool HloParser::ParseInstruction(HloComputation::Builder* builder,
}
instruction = builder->AddInstruction(HloInstruction::CreateReduce(
shape, /*operands=*/
- tensorflow::gtl::ArraySlice<HloInstruction*>(operands).subspan(
+ absl::Span<HloInstruction* const>(operands).subspan(
0, operands.size() / 2),
/*init_values=*/
- tensorflow::gtl::ArraySlice<HloInstruction*>(operands).subspan(
+ absl::Span<HloInstruction* const>(operands).subspan(
operands.size() / 2, operands.size()),
*dimensions_to_reduce, *reduce_computation));
break;
diff --git a/tensorflow/compiler/xla/service/hlo_reachability.cc b/tensorflow/compiler/xla/service/hlo_reachability.cc
index 01b088a957..961930f0a8 100644
--- a/tensorflow/compiler/xla/service/hlo_reachability.cc
+++ b/tensorflow/compiler/xla/service/hlo_reachability.cc
@@ -18,7 +18,7 @@ limitations under the License.
namespace xla {
HloReachabilityMap::HloReachabilityMap(
- tensorflow::gtl::ArraySlice<const HloInstruction*> instructions)
+ absl::Span<const HloInstruction* const> instructions)
: size_(instructions.size()) {
bit_vectors_.reserve(size_);
for (const HloInstruction* hlo : instructions) {
@@ -29,7 +29,7 @@ HloReachabilityMap::HloReachabilityMap(
}
bool HloReachabilityMap::SetReachabilityToUnion(
- tensorflow::gtl::ArraySlice<const HloInstruction*> inputs,
+ absl::Span<const HloInstruction* const> inputs,
const HloInstruction* instruction) {
BitVector& bit_vector = GetBitVector(instruction);
tmp_bit_vector_ = bit_vector;
@@ -38,13 +38,13 @@ bool HloReachabilityMap::SetReachabilityToUnion(
}
void HloReachabilityMap::FastSetReachabilityToUnion(
- tensorflow::gtl::ArraySlice<const HloInstruction*> inputs,
+ absl::Span<const HloInstruction* const> inputs,
const HloInstruction* instruction) {
SetReachabilityToUnionHelper(inputs, instruction, &GetBitVector(instruction));
}
void HloReachabilityMap::SetReachabilityToUnionHelper(
- tensorflow::gtl::ArraySlice<const HloInstruction*> inputs,
+ absl::Span<const HloInstruction* const> inputs,
const HloInstruction* instruction, BitVector* bit_vector) {
// If instruction is part of inputs, don't reset the bit_vector.
if (std::find(inputs.begin(), inputs.end(), instruction) == inputs.end()) {
diff --git a/tensorflow/compiler/xla/service/hlo_reachability.h b/tensorflow/compiler/xla/service/hlo_reachability.h
index 48215d32a8..2c8ebc8e6c 100644
--- a/tensorflow/compiler/xla/service/hlo_reachability.h
+++ b/tensorflow/compiler/xla/service/hlo_reachability.h
@@ -42,7 +42,7 @@ class HloReachabilityMap {
// Sets up a graph with no edges and where the nodes correspond to the given
// instructions.
explicit HloReachabilityMap(
- tensorflow::gtl::ArraySlice<const HloInstruction*> instructions);
+ absl::Span<const HloInstruction* const> instructions);
// Set the reachability set of 'instruction' to the union of the reachability
// sets of 'inputs'. Upon return, IsReachable(x, instruction) where
@@ -54,13 +54,12 @@ class HloReachabilityMap {
// vector in the internal graph of this HloReachabilityMap for the given
// instruction and does not transitively update any other part of the
// adjacency matrix.
- bool SetReachabilityToUnion(
- tensorflow::gtl::ArraySlice<const HloInstruction*> inputs,
- const HloInstruction* instruction);
+ bool SetReachabilityToUnion(absl::Span<const HloInstruction* const> inputs,
+ const HloInstruction* instruction);
// As above, but faster because it does not check if the reachability changed.
void FastSetReachabilityToUnion(
- tensorflow::gtl::ArraySlice<const HloInstruction*> inputs,
+ absl::Span<const HloInstruction* const> inputs,
const HloInstruction* instruction);
// Sets entry so that IsReachable(a, b) will return true
@@ -141,7 +140,7 @@ class HloReachabilityMap {
// Helper for SetReachabilityToUnion/FastSetReachabilityToUnion.
void SetReachabilityToUnionHelper(
- tensorflow::gtl::ArraySlice<const HloInstruction*> inputs,
+ absl::Span<const HloInstruction* const> inputs,
const HloInstruction* instruction, BitVector* bit_vector);
// Return the index of the given instruction. The value is used to index into
diff --git a/tensorflow/compiler/xla/service/hlo_rematerialization.cc b/tensorflow/compiler/xla/service/hlo_rematerialization.cc
index 569d2e5d2d..c9629926ea 100644
--- a/tensorflow/compiler/xla/service/hlo_rematerialization.cc
+++ b/tensorflow/compiler/xla/service/hlo_rematerialization.cc
@@ -202,8 +202,8 @@ class InstructionList {
// On object construction this ordinal is precisely the instruction's index
// in the list. Later, instructions inserted via InsertBefore receive
// duplicate values. However, monotonicity is preserved.
- void InsertBeforeInstructions(
- Item* to_insert, tensorflow::gtl::ArraySlice<Item*> before_instructions) {
+ void InsertBeforeInstructions(Item* to_insert,
+ absl::Span<Item* const> before_instructions) {
VLOG(3) << "InsertBeforeInstructions: " << to_insert->instruction->name()
<< " before {"
<< absl::StrJoin(before_instructions, ", ",
diff --git a/tensorflow/compiler/xla/service/hlo_runner.cc b/tensorflow/compiler/xla/service/hlo_runner.cc
index 7bd8a4a544..66ac1f66fd 100644
--- a/tensorflow/compiler/xla/service/hlo_runner.cc
+++ b/tensorflow/compiler/xla/service/hlo_runner.cc
@@ -106,7 +106,7 @@ StatusOr<ScopedShapedBuffer> HloRunner::TransferLiteralToDevice(
}
StatusOr<std::vector<ScopedShapedBuffer>> HloRunner::TransferLiteralsToDevice(
- const tensorflow::gtl::ArraySlice<const Literal*> literals) {
+ const absl::Span<const Literal* const> literals) {
std::vector<ScopedShapedBuffer> buffers;
for (const Literal* literal : literals) {
CHECK(literal != nullptr);
@@ -118,7 +118,7 @@ StatusOr<std::vector<ScopedShapedBuffer>> HloRunner::TransferLiteralsToDevice(
}
StatusOr<std::vector<ScopedShapedBuffer>> HloRunner::TransferLiteralsToDevice(
- const tensorflow::gtl::ArraySlice<std::unique_ptr<Literal>> literals) {
+ const absl::Span<const std::unique_ptr<Literal>> literals) {
std::vector<const Literal*> literal_pointers;
literal_pointers.reserve(literals.size());
for (const auto& literal : literals) {
@@ -137,8 +137,8 @@ StatusOr<std::unique_ptr<Literal>> HloRunner::TransferLiteralFromDevice(
StatusOr<std::unique_ptr<Literal>> HloRunner::Execute(
std::unique_ptr<HloModule> module,
- const tensorflow::gtl::ArraySlice<const Literal*> arguments,
- bool run_hlo_passes, ExecutionProfile* profile) {
+ const absl::Span<const Literal* const> arguments, bool run_hlo_passes,
+ ExecutionProfile* profile) {
TF_ASSIGN_OR_RETURN(std::vector<ScopedShapedBuffer> argument_buffers,
TransferLiteralsToDevice(arguments));
TF_ASSIGN_OR_RETURN(ScopedShapedBuffer result,
@@ -152,7 +152,7 @@ StatusOr<std::unique_ptr<Literal>> HloRunner::Execute(
StatusOr<std::unique_ptr<Literal>> HloRunner::Execute(
std::unique_ptr<HloModule> module,
- const tensorflow::gtl::ArraySlice<std::unique_ptr<Literal>> arguments,
+ const absl::Span<const std::unique_ptr<Literal>> arguments,
bool run_hlo_passes, ExecutionProfile* profile) {
// Construct a vector of plain pointers for the arguments.
std::vector<const Literal*> argument_pointers;
@@ -169,8 +169,8 @@ StatusOr<std::unique_ptr<Literal>> HloRunner::Execute(
StatusOr<ScopedShapedBuffer> HloRunner::ExecuteWithDeviceBuffers(
std::unique_ptr<HloModule> module,
- const tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
- bool run_hlo_passes, ExecutionProfile* profile) {
+ const absl::Span<const ShapedBuffer* const> arguments, bool run_hlo_passes,
+ ExecutionProfile* profile) {
// Get service run options.
se::Stream stream(backend().default_stream_executor());
stream.Init();
@@ -190,8 +190,8 @@ StatusOr<ScopedShapedBuffer> HloRunner::ExecuteWithDeviceBuffers(
StatusOr<ScopedShapedBuffer> HloRunner::ExecuteWithDeviceBuffers(
std::unique_ptr<HloModule> module,
- const tensorflow::gtl::ArraySlice<ScopedShapedBuffer> arguments,
- bool run_hlo_passes, ExecutionProfile* profile) {
+ const absl::Span<const ScopedShapedBuffer> arguments, bool run_hlo_passes,
+ ExecutionProfile* profile) {
std::vector<const ShapedBuffer*> argument_pointers;
argument_pointers.reserve(arguments.size());
for (const auto& argument : arguments) {
@@ -226,8 +226,7 @@ StatusOr<std::vector<std::unique_ptr<Literal>>> HloRunner::ExecuteReplicated(
// no arguments.
std::vector<const ShapedBuffer*> argument_buffer_ptrs(
options.num_replicas * options.arguments.size() + 1);
- std::vector<tensorflow::gtl::ArraySlice<const ShapedBuffer*>>
- argument_buffer_slices;
+ std::vector<absl::Span<const ShapedBuffer* const>> argument_buffer_slices;
int64 index = 0;
for (int64 i = 0; i < options.num_replicas; ++i) {
int64 device = device_assignment(i, 0);
diff --git a/tensorflow/compiler/xla/service/hlo_runner.h b/tensorflow/compiler/xla/service/hlo_runner.h
index cfc519063e..547b5fc1bb 100644
--- a/tensorflow/compiler/xla/service/hlo_runner.h
+++ b/tensorflow/compiler/xla/service/hlo_runner.h
@@ -104,9 +104,9 @@ class HloRunner {
// Transfers data between the host and device.
StatusOr<ScopedShapedBuffer> TransferLiteralToDevice(const Literal& literal);
StatusOr<std::vector<ScopedShapedBuffer>> TransferLiteralsToDevice(
- const tensorflow::gtl::ArraySlice<const Literal*> literals);
+ const absl::Span<const Literal* const> literals);
StatusOr<std::vector<ScopedShapedBuffer>> TransferLiteralsToDevice(
- const tensorflow::gtl::ArraySlice<std::unique_ptr<Literal>> literals);
+ const absl::Span<const std::unique_ptr<Literal>> literals);
StatusOr<std::unique_ptr<Literal>> TransferLiteralFromDevice(
const ShapedBuffer& buffer);
@@ -117,24 +117,24 @@ class HloRunner {
// optimization.
StatusOr<std::unique_ptr<Literal>> Execute(
std::unique_ptr<HloModule> module,
- const tensorflow::gtl::ArraySlice<const Literal*> arguments,
+ const absl::Span<const Literal* const> arguments,
bool run_hlo_passes = true, ExecutionProfile* profile = nullptr);
StatusOr<std::unique_ptr<Literal>> Execute(
std::unique_ptr<HloModule> module,
- const tensorflow::gtl::ArraySlice<std::unique_ptr<Literal>> arguments,
+ const absl::Span<const std::unique_ptr<Literal>> arguments,
bool run_hlo_passes = true, ExecutionProfile* profile = nullptr);
// As Execute(), but accepts and returns device buffers instead of host
// buffers.
StatusOr<ScopedShapedBuffer> ExecuteWithDeviceBuffers(
std::unique_ptr<HloModule> module,
- const tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
+ const absl::Span<const ShapedBuffer* const> arguments,
bool run_hlo_passes = true, ExecutionProfile* profile = nullptr);
StatusOr<ScopedShapedBuffer> ExecuteWithDeviceBuffers(
std::unique_ptr<HloModule> module,
- const tensorflow::gtl::ArraySlice<ScopedShapedBuffer> arguments,
+ const absl::Span<const ScopedShapedBuffer> arguments,
bool run_hlo_passes = true, ExecutionProfile* profile = nullptr);
// Executes a given HLO module into a set of replicas, and returns a map
diff --git a/tensorflow/compiler/xla/service/hlo_scheduling_test.cc b/tensorflow/compiler/xla/service/hlo_scheduling_test.cc
index 930801288a..d49d09d459 100644
--- a/tensorflow/compiler/xla/service/hlo_scheduling_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_scheduling_test.cc
@@ -269,7 +269,7 @@ TEST_F(HloSchedulingTest, TuplesAreAccountedCorrectly) {
auto abs_abs1 = builder.AddInstruction(
HloInstruction::CreateUnary(r1f32, HloOpcode::kAbs, abs_const));
auto tuple = builder.AddInstruction(HloInstruction::CreateTuple(
- tensorflow::gtl::ArraySlice<HloInstruction*>({abs_abs1})));
+ absl::Span<HloInstruction* const>({abs_abs1})));
auto tuple_elm = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(r1f32, tuple, 0));
diff --git a/tensorflow/compiler/xla/service/hlo_sharding.cc b/tensorflow/compiler/xla/service/hlo_sharding.cc
index 1235259764..de7e6b53d4 100644
--- a/tensorflow/compiler/xla/service/hlo_sharding.cc
+++ b/tensorflow/compiler/xla/service/hlo_sharding.cc
@@ -54,9 +54,8 @@ HloSharding HloSharding::Tuple(const ShapeTree<HloSharding>& sub_shardings) {
return HloSharding(flattened_list);
}
-HloSharding HloSharding::Tuple(
- const Shape& tuple_shape,
- tensorflow::gtl::ArraySlice<HloSharding> shardings) {
+HloSharding HloSharding::Tuple(const Shape& tuple_shape,
+ absl::Span<const HloSharding> shardings) {
CHECK(ShapeUtil::IsTuple(tuple_shape)) << ShapeUtil::HumanString(tuple_shape);
for (auto& sharding : shardings) {
CHECK(!sharding.IsTuple()) << sharding.ToString();
@@ -142,7 +141,7 @@ std::vector<int64> HloSharding::TileIndexForDevice(int64 device) const {
CHECK(!maximal_);
CHECK(!IsTuple());
std::vector<int64> ret_index;
- tile_assignment_.Each([&](tensorflow::gtl::ArraySlice<int64> index, int64 d) {
+ tile_assignment_.Each([&](absl::Span<const int64> index, int64 d) {
if (d == device) {
ret_index = {index.begin(), index.end()};
}
@@ -151,8 +150,7 @@ std::vector<int64> HloSharding::TileIndexForDevice(int64 device) const {
return ret_index;
}
-int64 HloSharding::DeviceForTileIndex(
- tensorflow::gtl::ArraySlice<int64> index) const {
+int64 HloSharding::DeviceForTileIndex(absl::Span<const int64> index) const {
CHECK(!replicated_);
CHECK(!IsTuple());
if (maximal_) {
@@ -319,7 +317,7 @@ Status HloSharding::ValidateNonTuple(const Shape& shape,
Status status = Status::OK();
std::set<int64> seen_cores;
tile_assignment_.Each(
- [&](tensorflow::gtl::ArraySlice<int64> indices, int32 core) {
+ [&](absl::Span<const int64> indices, int32 core) {
// Don't overwrite a bad status, so we report the first error.
if (status.ok()) {
if (core >= num_devices) {
diff --git a/tensorflow/compiler/xla/service/hlo_sharding.h b/tensorflow/compiler/xla/service/hlo_sharding.h
index be51c3f55b..01fd9f215d 100644
--- a/tensorflow/compiler/xla/service/hlo_sharding.h
+++ b/tensorflow/compiler/xla/service/hlo_sharding.h
@@ -66,7 +66,7 @@ class HloSharding {
// shardings must match the number of leaf nodes in tuple_shape. For
// empty tuples, the shardings array must have one element.
static HloSharding Tuple(const Shape& tuple_shape,
- tensorflow::gtl::ArraySlice<HloSharding> shardings);
+ absl::Span<const HloSharding> shardings);
// Creates a new sharding for a tuple type, with a single input sharding
// repeated on each leaf.
@@ -132,7 +132,7 @@ class HloSharding {
// Returns the device that should execute the given tile.
// It is an error to call this if is_replicated() is true.
// REQUIRES: !IsTuple()
- int64 DeviceForTileIndex(tensorflow::gtl::ArraySlice<int64> index) const;
+ int64 DeviceForTileIndex(absl::Span<const int64> index) const;
// Given a device ID, returns the offset within the specified shape of the
// tile that should be executed on the given core. This returns the lower
diff --git a/tensorflow/compiler/xla/service/hlo_sharding_metadata.cc b/tensorflow/compiler/xla/service/hlo_sharding_metadata.cc
index 6e9b96488c..34cba6136f 100644
--- a/tensorflow/compiler/xla/service/hlo_sharding_metadata.cc
+++ b/tensorflow/compiler/xla/service/hlo_sharding_metadata.cc
@@ -372,7 +372,7 @@ Status ApplyDomainSharding(const DomainMetadata::Domain& domain,
}
StatusOr<std::shared_ptr<const HloSharding>> ExtractOriginalCommonSharding(
- tensorflow::gtl::ArraySlice<HloInstruction*> instructions) {
+ absl::Span<HloInstruction* const> instructions) {
// If we are here, all the instructions being passed had the same sharding
// (or no sharding), by the means of the ShardingMatches() API.
// As such, no kDomain was inserted, and here we are asked to extract the
diff --git a/tensorflow/compiler/xla/service/hlo_sharding_test.cc b/tensorflow/compiler/xla/service/hlo_sharding_test.cc
index 2341f8ada0..80634677e7 100644
--- a/tensorflow/compiler/xla/service/hlo_sharding_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_sharding_test.cc
@@ -29,8 +29,8 @@ limitations under the License.
namespace xla {
namespace {
-Array<int64> MakeArray(tensorflow::gtl::ArraySlice<int64> dimensions,
- tensorflow::gtl::ArraySlice<int64> contents) {
+Array<int64> MakeArray(absl::Span<const int64> dimensions,
+ absl::Span<const int64> contents) {
Array<int64> a(dimensions);
std::copy(contents.begin(), contents.end(), a.begin());
return a;
diff --git a/tensorflow/compiler/xla/service/hlo_value.cc b/tensorflow/compiler/xla/service/hlo_value.cc
index e0c1326177..773fc7d225 100644
--- a/tensorflow/compiler/xla/service/hlo_value.cc
+++ b/tensorflow/compiler/xla/service/hlo_value.cc
@@ -149,7 +149,7 @@ bool MayUseOperandValue(int64 operand_number, const ShapeIndex& index,
} // namespace
void HloValue::SetPositionsAndComputeUses(
- tensorflow::gtl::ArraySlice<HloPosition> positions) {
+ absl::Span<const HloPosition> positions) {
CHECK_EQ(positions_.size(), 1) << "SetPositions should only be called once.";
// The positions must be unique and should not contain the defining position
@@ -222,8 +222,7 @@ string HloValueSet::ToString() const {
}));
}
-bool HloValueSet::AssignUnionOf(
- tensorflow::gtl::ArraySlice<const HloValueSet*> inputs) {
+bool HloValueSet::AssignUnionOf(absl::Span<const HloValueSet* const> inputs) {
HloValueSet union_set;
for (const HloValueSet* input : inputs) {
for (const HloValue* value : input->values()) {
@@ -254,7 +253,7 @@ std::ostream& operator<<(std::ostream& out, const HloValueSet& value_set) {
}
bool InstructionValueSet::AssignUnionOf(
- tensorflow::gtl::ArraySlice<const InstructionValueSet*> inputs) {
+ absl::Span<const InstructionValueSet* const> inputs) {
CHECK_GT(inputs.size(), 0);
for (int i = 1; i < inputs.size(); ++i) {
DCHECK(ShapeUtil::Compatible(inputs[0]->shape(), inputs[i]->shape()));
diff --git a/tensorflow/compiler/xla/service/hlo_value.h b/tensorflow/compiler/xla/service/hlo_value.h
index a1151f65e0..6f2ad214f6 100644
--- a/tensorflow/compiler/xla/service/hlo_value.h
+++ b/tensorflow/compiler/xla/service/hlo_value.h
@@ -108,8 +108,7 @@ class HloValue : public BufferValue {
// Sets the positions in the module at which the HloValue appears. Updates
// uses. Should be called once and only once. The defining position should not
// be included in 'positions' as this is set at construction time.
- void SetPositionsAndComputeUses(
- tensorflow::gtl::ArraySlice<HloPosition> positions);
+ void SetPositionsAndComputeUses(absl::Span<const HloPosition> positions);
// Returns whether this value is a phi value.
bool is_phi() const { return is_phi_; }
@@ -186,14 +185,14 @@ class HloValueSet {
public:
HloValueSet() = default;
- explicit HloValueSet(tensorflow::gtl::ArraySlice<const HloValue*> values)
+ explicit HloValueSet(absl::Span<const HloValue* const> values)
: values_(values.begin(), values.end()) {
SortAndUniquifyValues();
}
// Sets this value set to the union of the given value sets. Returns whether
// this value set changed.
- bool AssignUnionOf(tensorflow::gtl::ArraySlice<const HloValueSet*> inputs);
+ bool AssignUnionOf(absl::Span<const HloValueSet* const> inputs);
// Return the vector of HloValues in the set. Values in the vector are unique
// and stably sorted by value id.
@@ -247,8 +246,7 @@ class InstructionValueSet : public ShapeTree<HloValueSet> {
// Sets this value set to the union of the given value sets. Returns whether
// this value set changed.
- bool AssignUnionOf(
- tensorflow::gtl::ArraySlice<const InstructionValueSet*> inputs);
+ bool AssignUnionOf(absl::Span<const InstructionValueSet* const> inputs);
string ToString() const;
};
diff --git a/tensorflow/compiler/xla/service/hlo_verifier.cc b/tensorflow/compiler/xla/service/hlo_verifier.cc
index 744cd64bc5..95516dec74 100644
--- a/tensorflow/compiler/xla/service/hlo_verifier.cc
+++ b/tensorflow/compiler/xla/service/hlo_verifier.cc
@@ -699,8 +699,7 @@ Status ShapeVerifier::CheckVariadicShape(const HloInstruction* instruction) {
instruction->opcode(), instruction->operands()));
}
-string ComputationsToString(
- tensorflow::gtl::ArraySlice<HloComputation*> computations) {
+string ComputationsToString(absl::Span<HloComputation* const> computations) {
return absl::StrJoin(computations, ",",
[](string* s, const HloComputation* computation) {
s->append(computation->name());
diff --git a/tensorflow/compiler/xla/service/indexed_array_analysis.cc b/tensorflow/compiler/xla/service/indexed_array_analysis.cc
index 4d4f681c8a..a4de02a890 100644
--- a/tensorflow/compiler/xla/service/indexed_array_analysis.cc
+++ b/tensorflow/compiler/xla/service/indexed_array_analysis.cc
@@ -35,7 +35,6 @@ using ConstantArray = Analysis::ConstantArray;
using ReshapedArray = Analysis::ReshapedArray;
using ScalarIndexedArray = Analysis::ScalarIndexedArray;
using absl::StrJoin;
-using tensorflow::gtl::ArraySlice;
} // namespace
string IndexedArrayAnalysis::ToString(Array* root, bool print_constants) {
@@ -186,7 +185,7 @@ StatusOr<Analysis::Array*> IndexedArrayAnalysis::ComputeArrayForConstant(
StatusOr<ScalarIndexedArray*> IndexedArrayAnalysis::FoldGatherOfGather(
ScalarIndexedArray* source, Array* indices, int64 source_dim,
- tensorflow::gtl::ArraySlice<int64> output_dims, Shape shape) {
+ absl::Span<const int64> output_dims, Shape shape) {
// We want to transform Gather(Gather(A, X), Y) => Gather(A, Gather(X, Y)).
// `source` is the inner Gather(A, X).
@@ -252,8 +251,7 @@ StatusOr<ScalarIndexedArray*> IndexedArrayAnalysis::FoldGatherOfGather(
StatusOr<Analysis::Array*> IndexedArrayAnalysis::ComputeArrayForGather(
const Shape& shape, const GatherDimensionNumbers& dim_numbers,
- tensorflow::gtl::ArraySlice<int64> slice_sizes, Array* source,
- Array* indices) {
+ absl::Span<const int64> slice_sizes, Array* source, Array* indices) {
if (dim_numbers.index_vector_dim() != indices->shape().dimensions_size()) {
VLOG(3) << "ComputeArrayForGather: indices are not scalar";
return nullptr;
@@ -314,7 +312,7 @@ namespace {
// Returns an index into `values` such that the product of the range
// [values.begin()+index, values.end()) is equal to `product`. If there is no
// such index, return -1. All integers in `values` must be positive.
-int64 FindSuffixWithProduct(ArraySlice<int64> values, int64 product) {
+int64 FindSuffixWithProduct(absl::Span<const int64> values, int64 product) {
DCHECK(absl::c_all_of(values, [](int64 value) { return value > 0; }));
int64 current_product = 1;
@@ -343,7 +341,8 @@ struct ReshapePassthroughDimPair {
// The returned vector of pairs is sorted in both the result_dim and the
// operand_dim components.
std::vector<ReshapePassthroughDimPair> ComputeReshapePassthroughDimPairs(
- ArraySlice<int64> operand_shape, ArraySlice<int64> result_shape) {
+ absl::Span<const int64> operand_shape,
+ absl::Span<const int64> result_shape) {
// A reshape can be seen as an index mapping from output index to input index:
//
// (i_0, ..., i_n) = f(o_0, ..., o_m)
@@ -420,7 +419,7 @@ std::vector<ReshapePassthroughDimPair> ComputeReshapePassthroughDimPairs(
// Return true if `dim` is stated as an passthrough operand dim in
// `passthrough_dims`.
bool IsReshapePassthroughOperandDim(
- ArraySlice<ReshapePassthroughDimPair> passthrough_dims, int64 dim) {
+ absl::Span<const ReshapePassthroughDimPair> passthrough_dims, int64 dim) {
return absl::c_any_of(passthrough_dims,
[&](ReshapePassthroughDimPair passthrough_dim_pair) {
return passthrough_dim_pair.operand_dim == dim;
@@ -430,7 +429,8 @@ bool IsReshapePassthroughOperandDim(
// Maps `operand_dim` which must be an passthrough operand dimension to its
// corresponding passthrough result dimension based on `passthrough_dims`.
int64 MapPassthroughOperandDimToResultDim(
- ArraySlice<ReshapePassthroughDimPair> passthrough_dims, int64 operand_dim) {
+ absl::Span<const ReshapePassthroughDimPair> passthrough_dims,
+ int64 operand_dim) {
auto it = absl::c_find_if(
passthrough_dims, [&](ReshapePassthroughDimPair passthrough_dim_pair) {
return passthrough_dim_pair.operand_dim == operand_dim;
@@ -439,9 +439,9 @@ int64 MapPassthroughOperandDimToResultDim(
return it->result_dim;
}
-int64 FindSourcePositionForPassthroughResultDim(ArraySlice<int64> operand_shape,
- ArraySlice<int64> result_shape,
- int64 source_passthrough_dim) {
+int64 FindSourcePositionForPassthroughResultDim(
+ absl::Span<const int64> operand_shape, absl::Span<const int64> result_shape,
+ int64 source_passthrough_dim) {
VLOG(3) << "FindSourcePositionForPassthroughResultDim(["
<< StrJoin(operand_shape, ",") << "], [" << StrJoin(result_shape, ",")
<< "], " << source_passthrough_dim << ")";
@@ -519,8 +519,7 @@ IndexedArrayAnalysis::ReshapeToRemoveDegenerateDims(
}
StatusOr<ScalarIndexedArray*> IndexedArrayAnalysis::ReshapeToAddDegenerateDims(
- ScalarIndexedArray* operand,
- tensorflow::gtl::ArraySlice<int64> degenerate_dims) {
+ ScalarIndexedArray* operand, absl::Span<const int64> degenerate_dims) {
if (degenerate_dims.empty()) {
return operand;
}
@@ -873,7 +872,7 @@ IndexedArrayAnalysis::ComputeArrayForElementwiseBinaryOp(HloOpcode opcode,
return nullptr;
}
- ArraySlice<int64> broadcast_dims = broadcast_instr->dimensions();
+ absl::Span<const int64> broadcast_dims = broadcast_instr->dimensions();
auto is_broadcasted_dim = [&](int64 output_dim) {
return absl::c_find(broadcast_dims, output_dim) == broadcast_dims.end();
};
@@ -896,7 +895,7 @@ IndexedArrayAnalysis::ComputeArrayForElementwiseBinaryOp(HloOpcode opcode,
// The scalar-indexed node "removes" the source dim and "inserts" the output
// dims. We do the opposite here to undo the scalar-indexed operation.
- ArraySlice<int64> output_dims = scalar_indexed_const->output_dims();
+ absl::Span<const int64> output_dims = scalar_indexed_const->output_dims();
for (int64 i = output_dims.size() - 1; i >= 0; --i) {
CHECK(simulated_index[output_dims[i]] == IndexComponent::Broadcasted);
EraseAt(&simulated_index, output_dims[i]);
@@ -973,8 +972,8 @@ namespace {
// Returns the non-contracting non-batch dimension (as per `contracting_dims`
// and `batch_dims`) if there is exactly one, otherwise returns nullopt.
absl::optional<int64> GetOnlyNonContractingNonBatchDim(
- int64 rank, ArraySlice<int64> contracting_dims,
- ArraySlice<int64> batch_dims) {
+ int64 rank, absl::Span<const int64> contracting_dims,
+ absl::Span<const int64> batch_dims) {
absl::optional<int64> result;
for (int64 dim = 0; dim < rank; dim++) {
if (!absl::c_linear_search(contracting_dims, dim) &&
@@ -998,7 +997,8 @@ absl::optional<int64> GetOnlyNonContractingNonBatchDim(
// of whatever operand `indexed_array` is to the dot (LHS or RHS).
bool CanFoldDotIntoIndexedArray(
absl::string_view tag, Analysis::ScalarIndexedConstantArray* indexed_array,
- ArraySlice<int64> contracting_dims, ArraySlice<int64> batch_dims) {
+ absl::Span<const int64> contracting_dims,
+ absl::Span<const int64> batch_dims) {
absl::optional<int64> non_contracting_non_batch_dim =
GetOnlyNonContractingNonBatchDim(ShapeUtil::Rank(indexed_array->shape()),
contracting_dims, batch_dims);
diff --git a/tensorflow/compiler/xla/service/indexed_array_analysis.h b/tensorflow/compiler/xla/service/indexed_array_analysis.h
index 3fa7d749e1..dcfb725535 100644
--- a/tensorflow/compiler/xla/service/indexed_array_analysis.h
+++ b/tensorflow/compiler/xla/service/indexed_array_analysis.h
@@ -188,9 +188,7 @@ class IndexedArrayAnalysis {
// `output_dims` are the dimensions in the output array that are being used
// to compute an index into the `indices` array. See the class
// documentation and the overview for more details.
- tensorflow::gtl::ArraySlice<int64> output_dims() const {
- return output_dims_;
- }
+ absl::Span<const int64> output_dims() const { return output_dims_; }
private:
explicit ScalarIndexedArray(Array* source, Array* indices, int64 source_dim,
@@ -265,8 +263,7 @@ class IndexedArrayAnalysis {
StatusOr<Array*> ComputeArrayForGather(
const Shape& shape, const GatherDimensionNumbers& dim_numbers,
- tensorflow::gtl::ArraySlice<int64> slice_sizes, Array* source,
- Array* indices);
+ absl::Span<const int64> slice_sizes, Array* source, Array* indices);
StatusOr<Array*> ComputeArrayForDotWithIndexedLhs(
const Shape& shape, const DotDimensionNumbers& dim_numbers,
@@ -303,7 +300,7 @@ class IndexedArrayAnalysis {
// G1 = [Arr[i] for i in I2]
StatusOr<ScalarIndexedArray*> FoldGatherOfGather(
ScalarIndexedArray* source, Array* indices, int64 source_dim,
- tensorflow::gtl::ArraySlice<int64> output_dims, Shape shape);
+ absl::Span<const int64> output_dims, Shape shape);
// Reshapes a scalar-indexed node to remove the degenerate dimensions in its
// output. The result is always a scalar-indexed node.
@@ -313,8 +310,7 @@ class IndexedArrayAnalysis {
// Reshapes a scalar-indexed node such that the result has the degenerate
// dimensions `degenerate_dims`. The result is always a scalar-indexed node.
StatusOr<ScalarIndexedArray*> ReshapeToAddDegenerateDims(
- ScalarIndexedArray* operand,
- tensorflow::gtl::ArraySlice<int64> degenerate_dims);
+ ScalarIndexedArray* operand, absl::Span<const int64> degenerate_dims);
StatusOr<ScalarIndexedArray*> FoldReshapeOfGather(
const Shape& shape, ScalarIndexedConstantArray* operand);
diff --git a/tensorflow/compiler/xla/service/instruction_fusion.cc b/tensorflow/compiler/xla/service/instruction_fusion.cc
index 4b5285031b..8c907eae0c 100644
--- a/tensorflow/compiler/xla/service/instruction_fusion.cc
+++ b/tensorflow/compiler/xla/service/instruction_fusion.cc
@@ -219,7 +219,7 @@ bool InstructionFusion::CanFuseOnAllPaths(
InstructionFusion::HloInstructionSet
InstructionFusion::ComputeGloballyUnfusible(
- tensorflow::gtl::ArraySlice<HloInstruction*> post_order) {
+ absl::Span<HloInstruction* const> post_order) {
// Forbid fusion of producers that:
// a) Need to be duplicated, unless they can be fused into all consumers
// via all paths.
diff --git a/tensorflow/compiler/xla/service/instruction_fusion.h b/tensorflow/compiler/xla/service/instruction_fusion.h
index 9802d4cfc1..00b658959a 100644
--- a/tensorflow/compiler/xla/service/instruction_fusion.h
+++ b/tensorflow/compiler/xla/service/instruction_fusion.h
@@ -123,7 +123,7 @@ class InstructionFusion : public HloPassInterface {
// Computes the set of nodes that we do not want to fuse into any of their
// consumers based on a global analysis of the HLO graph.
HloInstructionSet ComputeGloballyUnfusible(
- tensorflow::gtl::ArraySlice<HloInstruction*> post_order);
+ absl::Span<HloInstruction* const> post_order);
// Used to determine if an HLO is expensive. Expensive operations will not be
// duplicated.
diff --git a/tensorflow/compiler/xla/service/interpreter/executable.cc b/tensorflow/compiler/xla/service/interpreter/executable.cc
index 2259dc1083..5dea124768 100644
--- a/tensorflow/compiler/xla/service/interpreter/executable.cc
+++ b/tensorflow/compiler/xla/service/interpreter/executable.cc
@@ -47,7 +47,7 @@ InterpreterExecutable::~InterpreterExecutable() {}
StatusOr<ScopedShapedBuffer> InterpreterExecutable::ExecuteOnStream(
const ServiceExecutableRunOptions* run_options,
- tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
+ absl::Span<const ShapedBuffer* const> arguments,
HloExecutionProfile* hlo_execution_profile) {
se::Stream* stream = run_options->stream();
se::StreamExecutor* executor = stream->parent();
@@ -111,7 +111,7 @@ StatusOr<ScopedShapedBuffer> InterpreterExecutable::ExecuteOnStream(
StatusOr<ScopedShapedBuffer> InterpreterExecutable::ExecuteAsyncOnStream(
const ServiceExecutableRunOptions* run_options,
- tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments) {
+ absl::Span<const ShapedBuffer* const> arguments) {
return tensorflow::errors::Unimplemented(
"ExecuteAsyncOnStream is not yet supported on Interpreter.");
}
diff --git a/tensorflow/compiler/xla/service/interpreter/executable.h b/tensorflow/compiler/xla/service/interpreter/executable.h
index 91d8148d26..588787d445 100644
--- a/tensorflow/compiler/xla/service/interpreter/executable.h
+++ b/tensorflow/compiler/xla/service/interpreter/executable.h
@@ -48,13 +48,13 @@ class InterpreterExecutable : public Executable {
StatusOr<ScopedShapedBuffer> ExecuteOnStream(
const ServiceExecutableRunOptions* run_options,
- tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
+ absl::Span<const ShapedBuffer* const> arguments,
HloExecutionProfile* hlo_execution_profile) override
LOCKS_EXCLUDED(evaluator_lock_);
StatusOr<ScopedShapedBuffer> ExecuteAsyncOnStream(
const ServiceExecutableRunOptions* run_options,
- tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments) override;
+ absl::Span<const ShapedBuffer* const> arguments) override;
static int64 ShapeSizeBytes(const Shape& shape);
diff --git a/tensorflow/compiler/xla/service/interpreter/executor.h b/tensorflow/compiler/xla/service/interpreter/executor.h
index db6b910b32..f600b14c6c 100644
--- a/tensorflow/compiler/xla/service/interpreter/executor.h
+++ b/tensorflow/compiler/xla/service/interpreter/executor.h
@@ -47,7 +47,7 @@ limitations under the License.
namespace stream_executor {
namespace interpreter {
-using Args = tensorflow::gtl::ArraySlice<DeviceMemoryBase>;
+using Args = absl::Span<const DeviceMemoryBase>;
class XlaInterpreterExecutor : public internal::StreamExecutorInterface {
public:
diff --git a/tensorflow/compiler/xla/service/llvm_ir/dynamic_update_slice_util.cc b/tensorflow/compiler/xla/service/llvm_ir/dynamic_update_slice_util.cc
index ad350613dd..cc2e862f2e 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/dynamic_update_slice_util.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/dynamic_update_slice_util.cc
@@ -99,9 +99,10 @@ static Status EmitDynamicUpdateSliceInPlaceImpl(
return LoopEmitter(loop_body_emitter, update_shape, b).EmitLoop(name);
}
-Status EmitDynamicUpdateSliceInPlace(
- tensorflow::gtl::ArraySlice<IrArray> operand_arrays,
- const IrArray& output_array, absl::string_view name, llvm::IRBuilder<>* b) {
+Status EmitDynamicUpdateSliceInPlace(absl::Span<const IrArray> operand_arrays,
+ const IrArray& output_array,
+ absl::string_view name,
+ llvm::IRBuilder<>* b) {
VLOG(2) << "EmitDynamicUpdateSliceInPlace for " << name;
// No need to use operand_arrays[0], the input array of the
@@ -129,8 +130,7 @@ Status EmitDynamicUpdateSliceInPlace(
//
// Emits a sequential loop if launch_dimensions is null.
static Status EmitFusedDynamicUpdateSliceInPlaceImpl(
- HloInstruction* fusion,
- tensorflow::gtl::ArraySlice<IrArray> fusion_operand_arrays,
+ HloInstruction* fusion, absl::Span<const IrArray> fusion_operand_arrays,
const IrArray& fusion_output_array, ElementalIrEmitter* elemental_emitter,
const gpu::LaunchDimensions* launch_dimensions, llvm::IRBuilder<>* b) {
CHECK_EQ(fusion->opcode(), HloOpcode::kFusion);
@@ -173,8 +173,7 @@ static Status EmitFusedDynamicUpdateSliceInPlaceImpl(
}
Status EmitFusedDynamicUpdateSliceInPlace(
- HloInstruction* fusion,
- tensorflow::gtl::ArraySlice<IrArray> fusion_operand_arrays,
+ HloInstruction* fusion, absl::Span<const IrArray> fusion_operand_arrays,
const IrArray& fusion_output_array, ElementalIrEmitter* elemental_emitter,
llvm::IRBuilder<>* b) {
return EmitFusedDynamicUpdateSliceInPlaceImpl(
@@ -183,8 +182,7 @@ Status EmitFusedDynamicUpdateSliceInPlace(
}
Status EmitParallelFusedDynamicUpdateSliceInPlace(
- HloInstruction* fusion,
- tensorflow::gtl::ArraySlice<IrArray> fusion_operand_arrays,
+ HloInstruction* fusion, absl::Span<const IrArray> fusion_operand_arrays,
const IrArray& fusion_output_array, ElementalIrEmitter* elemental_emitter,
const gpu::LaunchDimensions& launch_dimensions, llvm::IRBuilder<>* b) {
return EmitFusedDynamicUpdateSliceInPlaceImpl(
diff --git a/tensorflow/compiler/xla/service/llvm_ir/dynamic_update_slice_util.h b/tensorflow/compiler/xla/service/llvm_ir/dynamic_update_slice_util.h
index e1631a62ae..fb3e4eb97c 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/dynamic_update_slice_util.h
+++ b/tensorflow/compiler/xla/service/llvm_ir/dynamic_update_slice_util.h
@@ -63,25 +63,24 @@ inline bool CanEmitFusedDynamicUpdateSliceInPlace(
// Emits IR for running the given dynamic-update-slice op in-place -- that is,
// where the input and output buffers share the same slice, so we can simply
// modify the input/output buffer without touching any of the other elements.
-Status EmitDynamicUpdateSliceInPlace(
- tensorflow::gtl::ArraySlice<IrArray> operand_arrays,
- const IrArray& output_array, absl::string_view name, llvm::IRBuilder<>* b);
+Status EmitDynamicUpdateSliceInPlace(absl::Span<const IrArray> operand_arrays,
+ const IrArray& output_array,
+ absl::string_view name,
+ llvm::IRBuilder<>* b);
// Given a loop-fusion node whose root is a dynamic-update-slice op whose
// array-to-be-updated and output share the same buffer slice, emits
// (sequential) code for a fusion node that does the dynamic-update-slice in
// place.
Status EmitFusedDynamicUpdateSliceInPlace(
- HloInstruction* fusion,
- tensorflow::gtl::ArraySlice<IrArray> fusion_operand_arrays,
+ HloInstruction* fusion, absl::Span<const IrArray> fusion_operand_arrays,
const IrArray& fusion_output_array, ElementalIrEmitter* elemental_emitter,
llvm::IRBuilder<>* b);
// Same as EmitFusedDynamicUpdateSliceInPlace, except emits a parallel loop with
// the given launch dimensions.
Status EmitParallelFusedDynamicUpdateSliceInPlace(
- HloInstruction* fusion,
- tensorflow::gtl::ArraySlice<IrArray> fusion_operand_arrays,
+ HloInstruction* fusion, absl::Span<const IrArray> fusion_operand_arrays,
const IrArray& fusion_output_array, ElementalIrEmitter* elemental_emitter,
const gpu::LaunchDimensions& launch_dimensions, llvm::IRBuilder<>* b);
diff --git a/tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.cc b/tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.cc
index 6d637cad6d..b606c993a2 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.cc
@@ -147,7 +147,7 @@ Status FusedIrEmitter::HandleParameter(HloInstruction* parameter) {
}
Status FusedIrEmitter::HandleTuple(HloInstruction* tuple) {
- tensorflow::gtl::ArraySlice<HloInstruction*> operands(tuple->operands());
+ absl::Span<HloInstruction* const> operands(tuple->operands());
std::vector<llvm::Type*> operand_elemental_ir_types;
for (HloInstruction* operand : operands) {
operand_elemental_ir_types.push_back(llvm_ir::PrimitiveTypeToIrType(
diff --git a/tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.h b/tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.h
index 30471480c4..25ec458160 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.h
+++ b/tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.h
@@ -54,7 +54,7 @@ class FusedIrEmitter : public DfsHloVisitorWithDefault {
public:
using Generator = llvm_ir::ElementGenerator;
- FusedIrEmitter(tensorflow::gtl::ArraySlice<llvm_ir::IrArray> parameter_arrays,
+ FusedIrEmitter(absl::Span<const llvm_ir::IrArray> parameter_arrays,
ElementalIrEmitter* elemental_emitter)
: parameter_arrays_(parameter_arrays),
tiled_parameter_info_(nullptr),
@@ -94,7 +94,7 @@ class FusedIrEmitter : public DfsHloVisitorWithDefault {
private:
// Arrays of parameters of fusion instruction
- tensorflow::gtl::ArraySlice<llvm_ir::IrArray> parameter_arrays_;
+ absl::Span<const llvm_ir::IrArray> parameter_arrays_;
const llvm_ir::TiledParameterInfo* tiled_parameter_info_;
ElementalIrEmitter* elemental_emitter_;
diff --git a/tensorflow/compiler/xla/service/llvm_ir/ir_array.cc b/tensorflow/compiler/xla/service/llvm_ir/ir_array.cc
index 36e713d1ac..67f7423121 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/ir_array.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/ir_array.cc
@@ -73,7 +73,7 @@ IrArray::Index::Index(llvm::Value* linear, const Shape& shape,
Delinearize(&multidim_, linear, shape, b);
}
-IrArray::Index::Index(tensorflow::gtl::ArraySlice<llvm::Value*> multidim,
+IrArray::Index::Index(absl::Span<llvm::Value* const> multidim,
llvm::Value* linear, const Shape& shape)
: multidim_(multidim.begin(), multidim.end()),
linear_(linear),
@@ -92,7 +92,7 @@ IrArray::Index::Index(tensorflow::gtl::ArraySlice<llvm::Value*> multidim,
<< " should have a layout.";
}
-IrArray::Index::Index(tensorflow::gtl::ArraySlice<llvm::Value*> multidim,
+IrArray::Index::Index(absl::Span<llvm::Value* const> multidim,
const Shape& shape, llvm::IRBuilder<>* b)
: multidim_(multidim.begin(), multidim.end()),
layout_(shape.layout()),
@@ -147,7 +147,7 @@ IrArray::Index IrArray::Index::SourceIndexOfReshape(
// indices in the same common factor.
for (ssize_t k = common_factors.size() - 2; k >= 0; --k) {
llvm::Value* logical_linear_index =
- Index(tensorflow::gtl::ArraySlice<llvm::Value*>(multidim_).subspan(
+ Index(absl::Span<llvm::Value* const>(multidim_).subspan(
common_factors[k].second,
common_factors[k + 1].second - common_factors[k].second),
index_type_)
@@ -184,9 +184,8 @@ IrArray::Index IrArray::Index::SourceIndexOfReshape(
}
IrArray::Index IrArray::Index::SourceIndexOfSlice(
- const Shape& shape, tensorflow::gtl::ArraySlice<int64> starts,
- tensorflow::gtl::ArraySlice<int64> strides,
- llvm::IRBuilder<>* builder) const {
+ const Shape& shape, absl::Span<const int64> starts,
+ absl::Span<const int64> strides, llvm::IRBuilder<>* builder) const {
Index source_index(index_type_, multidim_.size());
for (int i = 0; i < multidim_.size(); ++i) {
int64 stride = strides[i];
@@ -207,7 +206,7 @@ IrArray::Index IrArray::Index::SourceIndexOfSlice(
IrArray::Index IrArray::Index::SourceIndexOfTranspose(
const Shape& shape, const Shape& operand_shape,
- tensorflow::gtl::ArraySlice<int64> dimension_mapping,
+ absl::Span<const int64> dimension_mapping,
llvm::IRBuilder<>* builder) const {
std::vector<llvm::Value*> operand_multidim_index =
Permute(dimension_mapping, multidim());
@@ -256,7 +255,7 @@ IrArray::Index IrArray::Index::SourceIndexOfBitcast(
IrArray::Index IrArray::Index::SourceIndexOfBroadcast(
const Shape& shape, const Shape& operand_shape,
- tensorflow::gtl::ArraySlice<int64> dimension_mapping,
+ absl::Span<const int64> dimension_mapping,
llvm::IRBuilder<>* builder) const {
int64 rank = ShapeUtil::Rank(operand_shape);
std::vector<llvm::Value*> source_index(rank);
@@ -321,9 +320,8 @@ IrArray::Index IrArray::Index::SourceIndexOfBroadcast(
return Index(source_index, linear, operand_shape);
}
-llvm::Value* IrArray::Index::Linearize(
- tensorflow::gtl::ArraySlice<int64> dimensions,
- llvm::IRBuilder<>* builder) const {
+llvm::Value* IrArray::Index::Linearize(absl::Span<const int64> dimensions,
+ llvm::IRBuilder<>* builder) const {
// Each dimension is multiplied by the product of the sizes of all
// earlier dimensions and added to the accumulator logical_linear_index.
CHECK_EQ(size(), dimensions.size());
diff --git a/tensorflow/compiler/xla/service/llvm_ir/ir_array.h b/tensorflow/compiler/xla/service/llvm_ir/ir_array.h
index e913c109b3..7629806a36 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/ir_array.h
+++ b/tensorflow/compiler/xla/service/llvm_ir/ir_array.h
@@ -70,7 +70,7 @@ class IrArray {
// Constructs an index from multi-dimensional index "multidim". The linear
// index is set to nullptr.
- explicit Index(tensorflow::gtl::ArraySlice<llvm::Value*> multidim,
+ explicit Index(absl::Span<llvm::Value* const> multidim,
llvm::Type* index_ty = nullptr)
: multidim_(multidim.begin(), multidim.end()) {
if (size() == 0) {
@@ -99,14 +99,14 @@ class IrArray {
// that it indexes into.
//
// Precondition: "shape" has a layout.
- Index(tensorflow::gtl::ArraySlice<llvm::Value*> multidim,
- const Shape& shape, llvm::IRBuilder<>* b);
+ Index(absl::Span<llvm::Value* const> multidim, const Shape& shape,
+ llvm::IRBuilder<>* b);
// Constructs an index from both a multi-dimensional index and a linear
// index. "shape" has the same meaning as that in the constructor that takes
// only a linear index.
- Index(tensorflow::gtl::ArraySlice<llvm::Value*> multidim,
- llvm::Value* linear, const Shape& shape);
+ Index(absl::Span<llvm::Value* const> multidim, llvm::Value* linear,
+ const Shape& shape);
const std::vector<llvm::Value*>& multidim() const { return multidim_; }
llvm::Value* linear() const { return linear_; }
@@ -145,17 +145,15 @@ class IrArray {
// by starting indices `starts` and stride values `strides`.
//
// Precondition: "this" is an index into a slice whose shape is `shape`.
- Index SourceIndexOfSlice(const Shape& shape,
- tensorflow::gtl::ArraySlice<int64> starts,
- tensorflow::gtl::ArraySlice<int64> strides,
+ Index SourceIndexOfSlice(const Shape& shape, absl::Span<const int64> starts,
+ absl::Span<const int64> strides,
llvm::IRBuilder<>* builder) const;
// Given that "this" is the target index of a transpose from `operand_shape`
// to `shape` with the given dimension mapping, returns the source index.
- Index SourceIndexOfTranspose(
- const Shape& shape, const Shape& operand_shape,
- tensorflow::gtl::ArraySlice<int64> dimension_mapping,
- llvm::IRBuilder<>* builder) const;
+ Index SourceIndexOfTranspose(const Shape& shape, const Shape& operand_shape,
+ absl::Span<const int64> dimension_mapping,
+ llvm::IRBuilder<>* builder) const;
// Given that "this" is the target index of a bitcast from `operand_shape`
// to `shape`, returns the source index.
@@ -164,14 +162,13 @@ class IrArray {
// Given that "this" is the target index of a broadcast from `operand_shape`
// to `shape` with the given dimension mapping, returns the source index.
- Index SourceIndexOfBroadcast(
- const Shape& shape, const Shape& operand_shape,
- tensorflow::gtl::ArraySlice<int64> dimension_mapping,
- llvm::IRBuilder<>* builder) const;
+ Index SourceIndexOfBroadcast(const Shape& shape, const Shape& operand_shape,
+ absl::Span<const int64> dimension_mapping,
+ llvm::IRBuilder<>* builder) const;
// Linearizes the index into the given shape, i.e. reshapes it to rank-1 and
// returns the index into the sole dimension 0 of the new shape.
- llvm::Value* Linearize(tensorflow::gtl::ArraySlice<int64> dimensions,
+ llvm::Value* Linearize(absl::Span<const int64> dimensions,
llvm::IRBuilder<>* builder) const;
llvm::Type* GetType() const { return index_type_; }
diff --git a/tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.h b/tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.h
index b152cf9275..43fec311f1 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.h
+++ b/tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.h
@@ -235,7 +235,7 @@ class KernelSupportLibrary {
}));
}
- using ArgumentVector = tensorflow::gtl::ArraySlice<llvm::Value*>;
+ using ArgumentVector = absl::Span<llvm::Value* const>;
// Generates the following control flow structure:
//
diff --git a/tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.cc b/tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.cc
index cb4d1db997..e5fbdbd51b 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.cc
@@ -28,7 +28,7 @@ namespace {
// Returns the indices of the first elements of all consecutive subarrays of the
// given array. For example:
// ConsecutiveSegments({m, m+1, m+2, n, k, k+1}) = {0, 3, 4}
-std::vector<size_t> ConsecutiveSegments(tensorflow::gtl::ArraySlice<int64> xs) {
+std::vector<size_t> ConsecutiveSegments(absl::Span<const int64> xs) {
std::vector<size_t> is = {0};
for (size_t i = 1; i < xs.size(); ++i) {
if (1 != xs[i] - xs[i - 1]) {
@@ -40,8 +40,7 @@ std::vector<size_t> ConsecutiveSegments(tensorflow::gtl::ArraySlice<int64> xs) {
// Merges the sequences of dimensions of the given shape which start at the
// given indices `segs`.
-Shape MergeDimensions(tensorflow::gtl::ArraySlice<size_t> segs,
- const Shape& shape) {
+Shape MergeDimensions(absl::Span<const size_t> segs, const Shape& shape) {
std::vector<int64> dimensions;
for (size_t i = 1; i <= segs.size(); ++i) {
dimensions.push_back(std::accumulate(
diff --git a/tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.h b/tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.h
index 8bd06c42c3..5ea05b3188 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.h
+++ b/tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.h
@@ -50,7 +50,7 @@ IrArray::Index GetUnreducedOutputIndex(
// for 021 transpose.
class TiledParameterInfo {
public:
- TiledParameterInfo(tensorflow::gtl::ArraySlice<llvm::Value*> param_buffers,
+ TiledParameterInfo(absl::Span<llvm::Value* const> param_buffers,
llvm::Value* y, llvm::Value* x)
: param_buffers_(param_buffers), y_(y), x_(x) {}
@@ -67,7 +67,7 @@ class TiledParameterInfo {
private:
// Param_buffers_[i] stores the tile buffer for the ith parameter or nullptr
// if the parameter is not tiled.
- tensorflow::gtl::ArraySlice<llvm::Value*> param_buffers_;
+ absl::Span<llvm::Value* const> param_buffers_;
// The y coordinate within a tile.
llvm::Value* y_;
// The x coordinate within a tile.
diff --git a/tensorflow/compiler/xla/service/llvm_ir/llvm_loop.cc b/tensorflow/compiler/xla/service/llvm_ir/llvm_loop.cc
index 9f3329e7f0..219a9f221f 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/llvm_loop.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/llvm_loop.cc
@@ -241,7 +241,7 @@ IrArray::Index ForLoopNest::AddLoopsForShape(const Shape& shape,
}
IrArray::Index ForLoopNest::AddLoopsForShapeOnDimensions(
- const Shape& shape, tensorflow::gtl::ArraySlice<int64> dimensions,
+ const Shape& shape, absl::Span<const int64> dimensions,
absl::string_view suffix) {
llvm_ir::IrArray::Index index(index_type_, shape.dimensions_size());
for (int64 dimension : dimensions) {
diff --git a/tensorflow/compiler/xla/service/llvm_ir/llvm_loop.h b/tensorflow/compiler/xla/service/llvm_ir/llvm_loop.h
index 0a406bd90b..2be7bbd0de 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/llvm_loop.h
+++ b/tensorflow/compiler/xla/service/llvm_ir/llvm_loop.h
@@ -242,7 +242,7 @@ class ForLoopNest {
// size equals the rank of shape and there is a null for each
// dimension that is not in "dimensions".
IrArray::Index AddLoopsForShapeOnDimensions(
- const Shape& shape, tensorflow::gtl::ArraySlice<int64> dimensions,
+ const Shape& shape, absl::Span<const int64> dimensions,
absl::string_view suffix);
// Emits a series of nested loops for iterating over an operand array. Loops
diff --git a/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc b/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc
index f0db2a3761..1a53c026be 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc
@@ -83,11 +83,10 @@ string DumpModuleToString(const llvm::Module& module) {
return AsString(buffer_string);
}
-llvm::Value* EmitCallToIntrinsic(
- llvm::Intrinsic::ID intrinsic_id,
- tensorflow::gtl::ArraySlice<llvm::Value*> operands,
- tensorflow::gtl::ArraySlice<llvm::Type*> overloaded_types,
- llvm::IRBuilder<>* b) {
+llvm::Value* EmitCallToIntrinsic(llvm::Intrinsic::ID intrinsic_id,
+ absl::Span<llvm::Value* const> operands,
+ absl::Span<llvm::Type* const> overloaded_types,
+ llvm::IRBuilder<>* b) {
llvm::Module* module = ModuleFromIRBuilder(b);
llvm::Function* intrinsic = llvm::Intrinsic::getDeclaration(
module, intrinsic_id, AsArrayRef(overloaded_types));
diff --git a/tensorflow/compiler/xla/service/llvm_ir/llvm_util.h b/tensorflow/compiler/xla/service/llvm_ir/llvm_util.h
index dde50e19d1..61b029eb08 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/llvm_util.h
+++ b/tensorflow/compiler/xla/service/llvm_ir/llvm_util.h
@@ -59,7 +59,7 @@ llvm::ArrayRef<T> AsArrayRef(const std::vector<T>& vec) {
}
template <typename T>
-llvm::ArrayRef<T> AsArrayRef(const tensorflow::gtl::ArraySlice<T>& slice) {
+llvm::ArrayRef<T> AsArrayRef(const absl::Span<const T>& slice) {
return llvm::ArrayRef<T>(slice.data(), slice.size());
}
@@ -101,11 +101,10 @@ string SanitizeFunctionName(string function_name);
// intrinsics (for example, "minnum") must include a type in overloaded_types
// for each overloaded type. Typically, overloaded intrinsics have only a single
// overloaded type.
-llvm::Value* EmitCallToIntrinsic(
- llvm::Intrinsic::ID intrinsic_id,
- tensorflow::gtl::ArraySlice<llvm::Value*> operands,
- tensorflow::gtl::ArraySlice<llvm::Type*> overloaded_types,
- llvm::IRBuilder<>* b);
+llvm::Value* EmitCallToIntrinsic(llvm::Intrinsic::ID intrinsic_id,
+ absl::Span<llvm::Value* const> operands,
+ absl::Span<llvm::Type* const> overloaded_types,
+ llvm::IRBuilder<>* b);
// Emit float max. Emit maxnum intrinsic is fast math is disabled, or
// fcmp+select otherwise
diff --git a/tensorflow/compiler/xla/service/llvm_ir/loop_emitter.cc b/tensorflow/compiler/xla/service/llvm_ir/loop_emitter.cc
index 1553b4fc91..0dc120e0b0 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/loop_emitter.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/loop_emitter.cc
@@ -69,7 +69,7 @@ static LoopEmitter::BodyEmitter MakeBodyEmitterForMultiOutputFusion(
}
LoopEmitter::LoopEmitter(const ElementGenerator& target_element_generator,
- tensorflow::gtl::ArraySlice<IrArray> target_arrays,
+ absl::Span<const IrArray> target_arrays,
llvm::IRBuilder<>* b)
: body_emitter_(MakeBodyEmitterForMultiOutputFusion(
target_element_generator,
diff --git a/tensorflow/compiler/xla/service/llvm_ir/loop_emitter.h b/tensorflow/compiler/xla/service/llvm_ir/loop_emitter.h
index 57d9d8bbc6..a537c00066 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/loop_emitter.h
+++ b/tensorflow/compiler/xla/service/llvm_ir/loop_emitter.h
@@ -53,8 +53,7 @@ class LoopEmitter {
// This is used for multi-output fusion. target_element_generator must
// produce an LLVM struct with N elements.
LoopEmitter(const ElementGenerator& target_element_generator,
- tensorflow::gtl::ArraySlice<IrArray> target_arrays,
- llvm::IRBuilder<>* b);
+ absl::Span<const IrArray> target_arrays, llvm::IRBuilder<>* b);
LoopEmitter(const LoopEmitter&) = delete;
LoopEmitter& operator=(const LoopEmitter&) = delete;
diff --git a/tensorflow/compiler/xla/service/llvm_ir/tuple_ops.cc b/tensorflow/compiler/xla/service/llvm_ir/tuple_ops.cc
index 11ed6ee59f..7d49b8d6c2 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/tuple_ops.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/tuple_ops.cc
@@ -64,8 +64,7 @@ void EmitTupleSelect(const IrArray& select, const IrArray& pred,
}
}
-void EmitTuple(const IrArray& tuple,
- tensorflow::gtl::ArraySlice<llvm::Value*> operands,
+void EmitTuple(const IrArray& tuple, absl::Span<llvm::Value* const> operands,
llvm::IRBuilder<>* b, llvm::Module* module) {
for (size_t i = 0; i < operands.size(); ++i) {
auto* store = b->CreateStore(
diff --git a/tensorflow/compiler/xla/service/llvm_ir/tuple_ops.h b/tensorflow/compiler/xla/service/llvm_ir/tuple_ops.h
index cf6bf5d0b1..cee211d66f 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/tuple_ops.h
+++ b/tensorflow/compiler/xla/service/llvm_ir/tuple_ops.h
@@ -65,8 +65,7 @@ void EmitTupleSelect(const IrArray& select, const IrArray& pred,
// A tuple is an array of pointers, one for each operand. Each pointer points to
// the output buffer of its corresponding operand.
-void EmitTuple(const IrArray& tuple,
- tensorflow::gtl::ArraySlice<llvm::Value*> operands,
+void EmitTuple(const IrArray& tuple, absl::Span<llvm::Value* const> operands,
llvm::IRBuilder<>* b, llvm::Module* module);
// A tuple is an array of pointers, one for each operand. Each pointer points to
diff --git a/tensorflow/compiler/xla/service/local_service.cc b/tensorflow/compiler/xla/service/local_service.cc
index 768105d9e1..0d0fb7946a 100644
--- a/tensorflow/compiler/xla/service/local_service.cc
+++ b/tensorflow/compiler/xla/service/local_service.cc
@@ -141,7 +141,7 @@ ExecutionOptions CreateExecutionOptions(
StatusOr<std::unique_ptr<Executable>> LocalService::CompileExecutable(
const XlaComputation& computation,
- const tensorflow::gtl::ArraySlice<const Shape*> argument_layouts,
+ const absl::Span<const Shape* const> argument_layouts,
const ExecutableBuildOptions& build_options) {
const HloModuleProto& proto = computation.proto();
TF_RET_CHECK(proto.has_program_shape());
diff --git a/tensorflow/compiler/xla/service/local_service.h b/tensorflow/compiler/xla/service/local_service.h
index 8f707ea904..acc8c6d2e0 100644
--- a/tensorflow/compiler/xla/service/local_service.h
+++ b/tensorflow/compiler/xla/service/local_service.h
@@ -48,7 +48,7 @@ class LocalService : public Service {
// compiler is responsible for freeing any memory it allocates this way.
StatusOr<std::unique_ptr<Executable>> CompileExecutable(
const XlaComputation& computation,
- const tensorflow::gtl::ArraySlice<const Shape*> argument_layouts,
+ const absl::Span<const Shape* const> argument_layouts,
const ExecutableBuildOptions& build_options);
// Returns the device ordinal that corresponds to the given replica number.
diff --git a/tensorflow/compiler/xla/service/multi_output_fusion.cc b/tensorflow/compiler/xla/service/multi_output_fusion.cc
index 4166ef5baf..b9ec31c497 100644
--- a/tensorflow/compiler/xla/service/multi_output_fusion.cc
+++ b/tensorflow/compiler/xla/service/multi_output_fusion.cc
@@ -262,7 +262,7 @@ void MultiOutputFusion::RecomputeReachability() {
void MultiOutputFusion::UpdateReachability(
HloInstruction* instr1, HloInstruction* instr2,
- tensorflow::gtl::ArraySlice<HloInstruction*> instrs_to_update,
+ absl::Span<HloInstruction* const> instrs_to_update,
const std::function<bool(HloInstruction*)>& skip) {
for (auto instr : instrs_to_update) {
if (skip != nullptr && skip(instr)) {
diff --git a/tensorflow/compiler/xla/service/multi_output_fusion.h b/tensorflow/compiler/xla/service/multi_output_fusion.h
index 4c8cb7d379..d2c52651c4 100644
--- a/tensorflow/compiler/xla/service/multi_output_fusion.h
+++ b/tensorflow/compiler/xla/service/multi_output_fusion.h
@@ -92,7 +92,7 @@ class MultiOutputFusion : public HloPassInterface {
// Update the reachability map after fusing instr1 and instr2.
void UpdateReachability(
HloInstruction* instr1, HloInstruction* instr2,
- tensorflow::gtl::ArraySlice<HloInstruction*> instrs_to_update,
+ absl::Span<HloInstruction* const> instrs_to_update,
const std::function<bool(HloInstruction*)>& skip = nullptr);
// Hook for multi-output fusion along producer-consumer edges.
diff --git a/tensorflow/compiler/xla/service/scatter_expander.cc b/tensorflow/compiler/xla/service/scatter_expander.cc
index 2077b57c05..2f4b2667c4 100644
--- a/tensorflow/compiler/xla/service/scatter_expander.cc
+++ b/tensorflow/compiler/xla/service/scatter_expander.cc
@@ -26,7 +26,6 @@ limitations under the License.
namespace xla {
-using tensorflow::gtl::ArraySlice;
// Transposes the given scatter_indices such that the index_vector_dim becomes
// the most-minor dimension.
@@ -87,7 +86,7 @@ static StatusOr<HloInstruction*> CanonicalizeScatterIndices(
// major dimensions and all the window dimensions appear in the minor
// dimensions.
static StatusOr<HloInstruction*> PermuteScatterAndWindowDims(
- HloInstruction* updates, ArraySlice<int64> update_window_dims) {
+ HloInstruction* updates, absl::Span<const int64> update_window_dims) {
std::vector<int64> permutation;
const int64 updates_rank = ShapeUtil::Rank(updates->shape());
permutation.reserve(updates_rank);
diff --git a/tensorflow/compiler/xla/service/service.cc b/tensorflow/compiler/xla/service/service.cc
index e10c1d9927..f0e2566a3f 100644
--- a/tensorflow/compiler/xla/service/service.cc
+++ b/tensorflow/compiler/xla/service/service.cc
@@ -62,10 +62,9 @@ using absl::StrCat;
using absl::StrFormat;
// Records the arguments used to invoke a computation in an HloSnapshot proto.
-Status RecordArguments(
- const tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
- se::Stream* stream, TransferManager* transfer_manager,
- HloSnapshot* module) {
+Status RecordArguments(const absl::Span<const ShapedBuffer* const> arguments,
+ se::Stream* stream, TransferManager* transfer_manager,
+ HloSnapshot* module) {
module->clear_arguments();
for (const ShapedBuffer* argument : arguments) {
TF_ASSIGN_OR_RETURN(
@@ -207,8 +206,8 @@ Status Service::ValidateResultShape(const Shape& client_shape,
StatusOr<std::vector<std::vector<const ShapedBuffer*>>>
Service::ResolveAndValidateArguments(
- tensorflow::gtl::ArraySlice<const GlobalDataHandle*> arguments,
- tensorflow::gtl::ArraySlice<se::StreamExecutor*> stream_executors) {
+ absl::Span<const GlobalDataHandle* const> arguments,
+ absl::Span<se::StreamExecutor* const> stream_executors) {
CHECK_EQ(options_.number_of_replicas(), stream_executors.size());
std::vector<std::vector<const ShapedBuffer*>> replicated_arguments;
replicated_arguments.resize(options_.number_of_replicas());
@@ -242,7 +241,7 @@ Service::ResolveAndValidateArguments(
StatusOr<std::unique_ptr<HloModuleConfig>> Service::CreateModuleConfig(
const ProgramShape& program_shape,
- tensorflow::gtl::ArraySlice<const Shape*> argument_shapes,
+ absl::Span<const Shape* const> argument_shapes,
const ExecutionOptions* execution_options) {
auto config = absl::make_unique<HloModuleConfig>(program_shape);
ComputationLayout* computation_layout =
@@ -299,7 +298,7 @@ StatusOr<std::unique_ptr<HloModuleConfig>> Service::CreateModuleConfig(
StatusOr<std::unique_ptr<HloModuleConfig>> Service::CreateModuleConfig(
const ProgramShape& program_shape,
- tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
+ absl::Span<const ShapedBuffer* const> arguments,
const ExecutionOptions& execution_options) {
std::vector<const Shape*> argument_shapes;
for (const auto* arg : arguments) {
@@ -367,12 +366,10 @@ StatusOr<std::vector<std::unique_ptr<Executable>>> Service::BuildExecutables(
StatusOr<std::vector<GlobalDataHandle>>
Service::ExecuteParallelAndRegisterResult(
- tensorflow::gtl::ArraySlice<Executable*> executables,
- tensorflow::gtl::ArraySlice<std::vector<std::vector<const ShapedBuffer*>>>
- arguments,
- Backend* backend, tensorflow::gtl::ArraySlice<DeviceHandle> device_handles,
- tensorflow::gtl::ArraySlice<string> result_tags,
- ExecutionProfile* profile) {
+ absl::Span<Executable* const> executables,
+ absl::Span<const std::vector<std::vector<const ShapedBuffer*>>> arguments,
+ Backend* backend, absl::Span<const DeviceHandle> device_handles,
+ absl::Span<const string> result_tags, ExecutionProfile* profile) {
// Streams where the computation are launched, so we can wait on the streams
// to complete.
std::vector<StreamPool::Ptr> streams;
@@ -511,8 +508,7 @@ Service::ExecuteParallelAndRegisterResult(
StatusOr<GlobalDataHandle> Service::ExecuteAndRegisterResult(
Executable* executable,
- const tensorflow::gtl::ArraySlice<std::vector<const ShapedBuffer*>>
- arguments,
+ const absl::Span<const std::vector<const ShapedBuffer*>> arguments,
Backend* backend, const string& result_tag, ExecutionProfile* profile) {
// Set up streams.
std::vector<StreamPool::Ptr> streams;
@@ -555,8 +551,7 @@ StatusOr<GlobalDataHandle> Service::ExecuteAndRegisterResult(
// TODO(b/69985541): Support profiling also on this path.
- std::vector<tensorflow::gtl::ArraySlice<const ShapedBuffer*>>
- replicated_arguments;
+ std::vector<absl::Span<const ShapedBuffer* const>> replicated_arguments;
for (const auto& arg : arguments) {
replicated_arguments.push_back(arg);
}
@@ -595,7 +590,7 @@ StatusOr<std::vector<se::StreamExecutor*>> Service::GetExecutors(
StatusOr<std::vector<std::vector<const ShapedBuffer*>>> Service::GetArguments(
const ExecutionOptions& execution_options,
- tensorflow::gtl::ArraySlice<const GlobalDataHandle*> arguments) {
+ absl::Span<const GlobalDataHandle* const> arguments) {
// Resolve the allocations for the arguments of the computation, and create
// a vector of device memory offsets for the arguments from the allocations.
// In the case of partitioned computations, assume all arguments go on the
diff --git a/tensorflow/compiler/xla/service/service.h b/tensorflow/compiler/xla/service/service.h
index 47d196fb2a..173300d8b6 100644
--- a/tensorflow/compiler/xla/service/service.h
+++ b/tensorflow/compiler/xla/service/service.h
@@ -176,7 +176,7 @@ class Service : public ServiceInterface {
// class.
StatusOr<std::unique_ptr<HloModuleConfig>> CreateModuleConfig(
const ProgramShape& program_shape,
- tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
+ absl::Span<const ShapedBuffer* const> arguments,
const ExecutionOptions& execution_options);
// Picks a parallel response and fills the result.
@@ -191,7 +191,7 @@ class Service : public ServiceInterface {
// Prepare the arguments for executing parallel.
StatusOr<std::vector<std::vector<const ShapedBuffer*>>> GetArguments(
const ExecutionOptions& execution_options,
- tensorflow::gtl::ArraySlice<const GlobalDataHandle*> arguments);
+ absl::Span<const GlobalDataHandle* const> arguments);
protected:
friend class LocalExecutable;
@@ -207,14 +207,14 @@ class Service : public ServiceInterface {
// the corresponding replica.
StatusOr<std::vector<std::vector<const ShapedBuffer*>>>
ResolveAndValidateArguments(
- tensorflow::gtl::ArraySlice<const GlobalDataHandle*> arguments,
- tensorflow::gtl::ArraySlice<se::StreamExecutor*> stream_executors);
+ absl::Span<const GlobalDataHandle* const> arguments,
+ absl::Span<se::StreamExecutor* const> stream_executors);
// Create a Hlo module config for the given program shape and arguments.
// execution_options is optional; if not given a default is used.
StatusOr<std::unique_ptr<HloModuleConfig>> CreateModuleConfig(
const ProgramShape& program_shape,
- tensorflow::gtl::ArraySlice<const Shape*> argument_shapes,
+ absl::Span<const Shape* const> argument_shapes,
const ExecutionOptions* execution_options);
// Builds an Executable for the given parameters.
@@ -242,21 +242,17 @@ class Service : public ServiceInterface {
// ExecutionProfile object which will be filled in with profile data.
StatusOr<GlobalDataHandle> ExecuteAndRegisterResult(
Executable* executable,
- const tensorflow::gtl::ArraySlice<std::vector<const ShapedBuffer*>>
- arguments,
+ const absl::Span<const std::vector<const ShapedBuffer*>> arguments,
Backend* backend, const string& result_tag, ExecutionProfile* profile);
// Runs the given executables with the given arguments and register the result
// from each executable in the allocation tracker. The handles of the result
// from the tracker are returned.
StatusOr<std::vector<GlobalDataHandle>> ExecuteParallelAndRegisterResult(
- tensorflow::gtl::ArraySlice<Executable*> executables,
- tensorflow::gtl::ArraySlice<std::vector<std::vector<const ShapedBuffer*>>>
- arguments,
- Backend* backend,
- tensorflow::gtl::ArraySlice<DeviceHandle> device_handles,
- tensorflow::gtl::ArraySlice<string> result_tags,
- ExecutionProfile* profile);
+ absl::Span<Executable* const> executables,
+ absl::Span<const std::vector<std::vector<const ShapedBuffer*>>> arguments,
+ Backend* backend, absl::Span<const DeviceHandle> device_handles,
+ absl::Span<const string> result_tags, ExecutionProfile* profile);
// Executes a single computation which has more than one target device.
// The N devices are expected to all return an empty tuple, but one, which
diff --git a/tensorflow/compiler/xla/service/shape_inference.cc b/tensorflow/compiler/xla/service/shape_inference.cc
index 45427bba25..2611749862 100644
--- a/tensorflow/compiler/xla/service/shape_inference.cc
+++ b/tensorflow/compiler/xla/service/shape_inference.cc
@@ -45,7 +45,7 @@ using absl::StrFormat;
using absl::StrJoin;
// Returns true if no element is present in slice more than once.
-bool AllUnique(tensorflow::gtl::ArraySlice<int64> slice) {
+bool AllUnique(absl::Span<const int64> slice) {
return std::set<int64>(slice.begin(), slice.end()).size() == slice.size();
}
@@ -57,11 +57,10 @@ Status ExpectArray(const Shape& shape, absl::string_view op_type) {
return Status::OK();
}
-Status VerifyReducerShape(
- const ProgramShape& reducer_shape,
- tensorflow::gtl::ArraySlice<const Shape*> init_value_shapes,
- tensorflow::gtl::ArraySlice<PrimitiveType> input_element_types,
- int64 inputs) {
+Status VerifyReducerShape(const ProgramShape& reducer_shape,
+ absl::Span<const Shape* const> init_value_shapes,
+ absl::Span<const PrimitiveType> input_element_types,
+ int64 inputs) {
if (reducer_shape.parameters_size() != inputs * 2) {
return InvalidArgument(
"Reduction function must take %d parameters, but "
@@ -335,8 +334,7 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
}
/* static */ StatusOr<Shape> ShapeInference::InferConcatOpShape(
- tensorflow::gtl::ArraySlice<const Shape*> arg_shapes,
- const int64 dimension) {
+ absl::Span<const Shape* const> arg_shapes, const int64 dimension) {
if (arg_shapes.empty()) {
return InvalidArgument("Concatenate expects at least one argument.");
}
@@ -394,7 +392,7 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
}
/* static */ StatusOr<Shape> ShapeInference::InferAfterAllShape(
- tensorflow::gtl::ArraySlice<const Shape*> arg_shapes) {
+ absl::Span<const Shape* const> arg_shapes) {
for (const Shape* arg_shape : arg_shapes) {
if (arg_shape->element_type() != TOKEN) {
return InvalidArgument(
@@ -550,22 +548,22 @@ Status ValidateDotDimensionNumbers(
const Shape& lhs, const Shape& rhs,
const DotDimensionNumbers& dimension_numbers) {
// Check that dimension numbers are in range.
- auto dims_in_range =
- [](const int64 rank, tensorflow::gtl::ArraySlice<int64> contracting_dims,
- tensorflow::gtl::ArraySlice<int64> batch_dims) -> bool {
+ auto dims_in_range = [](const int64 rank,
+ absl::Span<const int64> contracting_dims,
+ absl::Span<const int64> batch_dims) -> bool {
auto in_range = [&rank](int64 i) -> bool { return 0 <= i && i < rank; };
return std::all_of(contracting_dims.begin(), contracting_dims.end(),
in_range) &&
std::all_of(batch_dims.begin(), batch_dims.end(), in_range);
};
- tensorflow::gtl::ArraySlice<int64> lhs_contracting_dimensions =
+ absl::Span<const int64> lhs_contracting_dimensions =
AsInt64Slice(dimension_numbers.lhs_contracting_dimensions());
- tensorflow::gtl::ArraySlice<int64> rhs_contracting_dimensions =
+ absl::Span<const int64> rhs_contracting_dimensions =
AsInt64Slice(dimension_numbers.rhs_contracting_dimensions());
- tensorflow::gtl::ArraySlice<int64> lhs_batch_dimensions =
+ absl::Span<const int64> lhs_batch_dimensions =
AsInt64Slice(dimension_numbers.lhs_batch_dimensions());
- tensorflow::gtl::ArraySlice<int64> rhs_batch_dimensions =
+ absl::Span<const int64> rhs_batch_dimensions =
AsInt64Slice(dimension_numbers.rhs_batch_dimensions());
if (!dims_in_range(ShapeUtil::Rank(lhs), lhs_contracting_dimensions,
@@ -577,8 +575,8 @@ Status ValidateDotDimensionNumbers(
}
// Check that dimension numbers are unique.
- auto dims_unique = [](tensorflow::gtl::ArraySlice<int64> contracting_dims,
- tensorflow::gtl::ArraySlice<int64> batch_dims) -> bool {
+ auto dims_unique = [](absl::Span<const int64> contracting_dims,
+ absl::Span<const int64> batch_dims) -> bool {
tensorflow::gtl::FlatSet<int64> dim_set;
auto is_unique = [&dim_set](int64 i) -> bool {
return dim_set.insert(i).second;
@@ -748,7 +746,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
/* static */ StatusOr<Shape> ShapeInference::InferInDimBroadcastShape(
const Shape& smaller_shape, const Shape& larger_shape,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
if (broadcast_dimensions.empty() && !ShapeUtil::IsScalar(smaller_shape)) {
// Reject "magic" inference for binops on different shapes, requiring
// the user to provide an explicit broadcast dimension in this case.
@@ -849,7 +847,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
/* static */ StatusOr<Shape> ShapeInference::InferElementwiseBinaryOpShape(
HloOpcode operation, const Shape& lhs, const Shape& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
TF_RETURN_IF_ERROR(ExpectArray(lhs, "lhs of elementwise binary operation"));
TF_RETURN_IF_ERROR(ExpectArray(rhs, "rhs of elementwise binary operation"));
@@ -906,7 +904,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
/* static */ StatusOr<Shape> ShapeInference::InferBinaryOpShape(
HloOpcode opcode, const Shape& lhs, const Shape& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
+ absl::Span<const int64> broadcast_dimensions) {
VLOG(2) << StrFormat(
"inferring shape for <%s>(%s, %s) with broadcast_dimensions={%s}",
HloOpcodeString(opcode), ShapeUtil::HumanString(lhs),
@@ -1005,8 +1003,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
}
/* static */ StatusOr<Shape> ShapeInference::InferVariadicOpShape(
- HloOpcode opcode,
- tensorflow::gtl::ArraySlice<const HloInstruction*> operands) {
+ HloOpcode opcode, absl::Span<const HloInstruction* const> operands) {
std::vector<const Shape*> operand_shapes;
operand_shapes.reserve(operands.size());
for (const HloInstruction* operand : operands) {
@@ -1016,8 +1013,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
}
/* static */ StatusOr<Shape> ShapeInference::InferVariadicOpShape(
- HloOpcode opcode,
- tensorflow::gtl::ArraySlice<const Shape*> operand_shapes) {
+ HloOpcode opcode, absl::Span<const Shape* const> operand_shapes) {
for (const Shape* shape : operand_shapes) {
TF_DCHECK_OK(ShapeUtil::ValidateShapeWithOptionalLayout(*shape));
}
@@ -1053,9 +1049,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
}
/* static */ StatusOr<Shape> ShapeInference::InferMapShape(
- tensorflow::gtl::ArraySlice<const Shape*> arg_shapes,
- const ProgramShape& to_apply,
- tensorflow::gtl::ArraySlice<int64> dimensions) {
+ absl::Span<const Shape* const> arg_shapes, const ProgramShape& to_apply,
+ absl::Span<const int64> dimensions) {
if (arg_shapes.empty()) {
return InvalidArgument("Map expects at least one argument.");
}
@@ -1711,7 +1706,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
/* static */ StatusOr<Shape> ShapeInference::InferFftShape(
const Shape& in, const FftType fft_type,
- const tensorflow::gtl::ArraySlice<int64> fft_length) {
+ const absl::Span<const int64> fft_length) {
const int64 fft_rank = fft_length.size();
if (fft_rank < 1 || fft_rank > 3) {
return InvalidArgument("FFT only supports ranks 1-3; got %d.", fft_rank);
@@ -1792,7 +1787,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
}
/* static */ StatusOr<Shape> ShapeInference::InferCrossReplicaSumShape(
- tensorflow::gtl::ArraySlice<const Shape*> operand_shapes) {
+ absl::Span<const Shape* const> operand_shapes) {
for (const Shape* operand_shape : operand_shapes) {
TF_RETURN_IF_ERROR(
ExpectArray(*operand_shape, "operand of cross replica sum"));
@@ -1835,7 +1830,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
}
/* static */ StatusOr<Shape> ShapeInference::InferAllToAllTupleShape(
- tensorflow::gtl::ArraySlice<const Shape*> operand_shapes) {
+ absl::Span<const Shape* const> operand_shapes) {
// An Alltoall HLO instruction receives N operands (with the same shape) and
// returns a tuple that contains N array shapes.
TF_RET_CHECK(!operand_shapes.empty());
@@ -1859,8 +1854,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
}
/* static */ StatusOr<Shape> ShapeInference::InferReduceShape(
- tensorflow::gtl::ArraySlice<const Shape*> arg_shapes,
- tensorflow::gtl::ArraySlice<int64> dimensions_to_reduce,
+ absl::Span<const Shape* const> arg_shapes,
+ absl::Span<const int64> dimensions_to_reduce,
const ProgramShape& to_apply) {
if (arg_shapes.empty()) {
return InvalidArgument("Reduce must have at least 2 arguments, has 0");
@@ -1998,9 +1993,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
}
/* static */ StatusOr<Shape> ShapeInference::InferSliceShape(
- const Shape& arg, tensorflow::gtl::ArraySlice<int64> starts,
- tensorflow::gtl::ArraySlice<int64> limits,
- tensorflow::gtl::ArraySlice<int64> strides) {
+ const Shape& arg, absl::Span<const int64> starts,
+ absl::Span<const int64> limits, absl::Span<const int64> strides) {
auto error = [&](const string& message) {
return InvalidArgument(
"%s in slice operation; argument shape: %s; starts: {%s}; limits: "
@@ -2062,7 +2056,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
/* static */ StatusOr<Shape> ShapeInference::InferDynamicSliceShape(
const Shape& operand_shape, const Shape& start_indices_shape,
- tensorflow::gtl::ArraySlice<int64> slice_sizes) {
+ absl::Span<const int64> slice_sizes) {
TF_RETURN_IF_ERROR(ExpectArray(operand_shape, "operand of dynamic slice"));
TF_RETURN_IF_ERROR(
ExpectArray(start_indices_shape, "start indices of dynamic slice"));
@@ -2189,7 +2183,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
}
/*static */ StatusOr<Shape> ShapeInference::InferReverseShape(
- const Shape& operand_shape, tensorflow::gtl::ArraySlice<int64> dimensions) {
+ const Shape& operand_shape, absl::Span<const int64> dimensions) {
TF_RETURN_IF_ERROR(ExpectArray(operand_shape, "operand of reverse"));
if (!AllUnique(dimensions)) {
return InvalidArgument("a dimension number is duplicated in reverse");
@@ -2315,7 +2309,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
}
/* static */ StatusOr<Shape> ShapeInference::InferBroadcastShape(
- const Shape& operand, tensorflow::gtl::ArraySlice<int64> broadcast_sizes) {
+ const Shape& operand, absl::Span<const int64> broadcast_sizes) {
TF_RETURN_IF_ERROR(ExpectArray(operand, "operand of broadcast"));
for (int64 size : broadcast_sizes) {
if (size < 0) {
@@ -2333,8 +2327,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
}
/* static */ StatusOr<Shape> ShapeInference::InferReshapeShape(
- const Shape& operand, tensorflow::gtl::ArraySlice<int64> dimensions,
- tensorflow::gtl::ArraySlice<int64> new_sizes) {
+ const Shape& operand, absl::Span<const int64> dimensions,
+ absl::Span<const int64> new_sizes) {
TF_RETURN_IF_ERROR(ExpectArray(operand, "reshape"));
Shape inferred_shape =
@@ -2366,7 +2360,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
}
/* static */ StatusOr<Shape> ShapeInference::InferTransposeShape(
- const Shape& operand, tensorflow::gtl::ArraySlice<int64> dimensions) {
+ const Shape& operand, absl::Span<const int64> dimensions) {
TF_RETURN_IF_ERROR(ExpectArray(operand, "transpose"));
std::vector<int64> indices(ShapeUtil::Rank(operand));
@@ -2471,8 +2465,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
}
/* static */ StatusOr<Shape> ShapeInference::InferCallShape(
- tensorflow::gtl::ArraySlice<const Shape*> arg_shapes,
- const ProgramShape& to_apply) {
+ absl::Span<const Shape* const> arg_shapes, const ProgramShape& to_apply) {
// The applied function's arity equals the number of arguments.
if (arg_shapes.size() != to_apply.parameters_size()) {
string computation_signature = ShapeUtil::HumanString(to_apply);
@@ -2505,8 +2498,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
}
static Status ValidateGatherDimensionNumbers(
- const Shape& input_shape,
- tensorflow::gtl::ArraySlice<int64> start_indices_shape,
+ const Shape& input_shape, absl::Span<const int64> start_indices_shape,
const GatherDimensionNumbers& dim_numbers) {
if (!absl::c_is_sorted(dim_numbers.offset_dims())) {
return InvalidArgument(
@@ -2599,7 +2591,7 @@ static Status ValidateGatherDimensionNumbers(
/*static*/ StatusOr<Shape> ShapeInference::InferGatherShape(
const Shape& input_shape, const Shape& start_indices_shape,
const GatherDimensionNumbers& gather_dim_numbers,
- tensorflow::gtl::ArraySlice<int64> slice_sizes) {
+ absl::Span<const int64> slice_sizes) {
TF_RETURN_IF_ERROR(
ExpectArray(input_shape, "input tensor operand gather op"));
TF_RETURN_IF_ERROR(
@@ -2709,8 +2701,7 @@ static Status ValidateGatherDimensionNumbers(
namespace {
Status ValidateScatterDimensionNumbers(
- const Shape& operand_shape,
- tensorflow::gtl::ArraySlice<int64> scatter_indices_shape,
+ const Shape& operand_shape, absl::Span<const int64> scatter_indices_shape,
const Shape& updates_shape, const ScatterDimensionNumbers& dim_numbers) {
// Validate update_window_dims in ScatterDimensionNumbers.
if (!absl::c_is_sorted(dim_numbers.update_window_dims())) {
diff --git a/tensorflow/compiler/xla/service/shape_inference.h b/tensorflow/compiler/xla/service/shape_inference.h
index 235b1a4cf3..072ada2d8f 100644
--- a/tensorflow/compiler/xla/service/shape_inference.h
+++ b/tensorflow/compiler/xla/service/shape_inference.h
@@ -55,7 +55,7 @@ class ShapeInference {
// given input shapes.
static StatusOr<Shape> InferBinaryOpShape(
HloOpcode opcode, const Shape& lhs, const Shape& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ absl::Span<const int64> broadcast_dimensions);
static StatusOr<Shape> InferBinaryOpShape(HloOpcode opcode,
const HloInstruction* lhs,
const HloInstruction* rhs);
@@ -73,18 +73,15 @@ class ShapeInference {
// Infers the shape produced by applying the given variadic operation to the
// given input operand shapes.
static StatusOr<Shape> InferVariadicOpShape(
- HloOpcode opcode,
- tensorflow::gtl::ArraySlice<const Shape*> operand_shapes);
+ HloOpcode opcode, absl::Span<const Shape* const> operand_shapes);
static StatusOr<Shape> InferVariadicOpShape(
- HloOpcode opcode,
- tensorflow::gtl::ArraySlice<const HloInstruction*> operands);
+ HloOpcode opcode, absl::Span<const HloInstruction* const> operands);
// Infers the shape produced by applying the given mapping computation shape
// to the given operand shapes.
static StatusOr<Shape> InferMapShape(
- tensorflow::gtl::ArraySlice<const Shape*> arg_shapes,
- const ProgramShape& to_apply,
- tensorflow::gtl::ArraySlice<int64> dimensions);
+ absl::Span<const Shape* const> arg_shapes, const ProgramShape& to_apply,
+ absl::Span<const int64> dimensions);
// Infers the shape produced by InferBatchNormTraining with the given
// operands.
@@ -116,14 +113,13 @@ class ShapeInference {
int64 feature_group_count = 1);
// Infers the shape produced by the given FFT type on the given operand.
- static StatusOr<Shape> InferFftShape(
- const Shape& in, FftType fft_type,
- tensorflow::gtl::ArraySlice<int64> fft_length);
+ static StatusOr<Shape> InferFftShape(const Shape& in, FftType fft_type,
+ absl::Span<const int64> fft_length);
// Infers the shape produced by a cross replica sum with the given operand
// shapes.
static StatusOr<Shape> InferCrossReplicaSumShape(
- tensorflow::gtl::ArraySlice<const Shape*> operand_shapes);
+ absl::Span<const Shape* const> operand_shapes);
// Infers final shape of an Alltoall operation that is created by the xla
// builder.
@@ -134,7 +130,7 @@ class ShapeInference {
// Infers the shape of an HLO all-to-all instruction.
static StatusOr<Shape> InferAllToAllTupleShape(
- tensorflow::gtl::ArraySlice<const Shape*> operand_shapes);
+ absl::Span<const Shape* const> operand_shapes);
// Infers the shape of a collective permute operation.
static StatusOr<Shape> InferCollectivePermuteShape(const Shape& shape);
@@ -146,8 +142,8 @@ class ShapeInference {
// index as the leading parameter, and the program shape should match
// accordingly (or an error will result).
static StatusOr<Shape> InferReduceShape(
- tensorflow::gtl::ArraySlice<const Shape*> arg_shapes,
- tensorflow::gtl::ArraySlice<int64> dimensions_to_reduce,
+ absl::Span<const Shape* const> arg_shapes,
+ absl::Span<const int64> dimensions_to_reduce,
const ProgramShape& to_apply);
// Infers the shape produced by applying the given computation to the operand
@@ -165,24 +161,23 @@ class ShapeInference {
// Infers the shape produced by a reverse operation that reverses the order
// of the elements in the given dimensions.
- static StatusOr<Shape> InferReverseShape(
- const Shape& operand_shape,
- tensorflow::gtl::ArraySlice<int64> dimensions);
+ static StatusOr<Shape> InferReverseShape(const Shape& operand_shape,
+ absl::Span<const int64> dimensions);
// Infers the shape produced by a slice operation spanning from the starts to
// the limits in the original shape's dimensions.
//
// e.g. slice f32[32x32] 0:16 0:16 -> f32[16x16]
- static StatusOr<Shape> InferSliceShape(
- const Shape& arg, tensorflow::gtl::ArraySlice<int64> starts,
- tensorflow::gtl::ArraySlice<int64> limits,
- tensorflow::gtl::ArraySlice<int64> strides);
+ static StatusOr<Shape> InferSliceShape(const Shape& arg,
+ absl::Span<const int64> starts,
+ absl::Span<const int64> limits,
+ absl::Span<const int64> strides);
// Infers the shape produced by a dynamic slice operation of size specified
// in 'slice_sizes', with dynamic start indices shape 'start_indices_shape'.
static StatusOr<Shape> InferDynamicSliceShape(
const Shape& operand_shape, const Shape& start_indices_shape,
- tensorflow::gtl::ArraySlice<int64> slice_sizes);
+ absl::Span<const int64> slice_sizes);
// Infers the shape produced by a dynamic update slice operation based
// on the shape of operand and update.
@@ -213,30 +208,30 @@ class ShapeInference {
// Infers the shape produced by a broadcast operation.
static StatusOr<Shape> InferBroadcastShape(
- const Shape& operand, tensorflow::gtl::ArraySlice<int64> broadcast_sizes);
+ const Shape& operand, absl::Span<const int64> broadcast_sizes);
// Infers the shape produced by a reshape operation from the element type of
// its operand and the new dimension sizes specified.
- static StatusOr<Shape> InferReshapeShape(
- const Shape& operand, tensorflow::gtl::ArraySlice<int64> dimensions,
- tensorflow::gtl::ArraySlice<int64> new_sizes);
+ static StatusOr<Shape> InferReshapeShape(const Shape& operand,
+ absl::Span<const int64> dimensions,
+ absl::Span<const int64> new_sizes);
// Infers the shape produced by a transpose operation from the element type of
// its operand and its dimensions field.
static StatusOr<Shape> InferTransposeShape(
- const Shape& operand, tensorflow::gtl::ArraySlice<int64> dimensions);
+ const Shape& operand, absl::Span<const int64> dimensions);
// Helper that infers the shape produced by performing a concatenate operation
// with the given operand shapes.
static StatusOr<Shape> InferConcatOpShape(
- tensorflow::gtl::ArraySlice<const Shape*> arg_shapes, int64 dimension);
+ absl::Span<const Shape* const> arg_shapes, int64 dimension);
// Infers the shape produced by a kAfterAll. Trivially this shape is always a
// TOKEN shape. However, ShapeInference serves two purposes: inferring shapes
// and checking operand shapes. This method verifies that the operand shapes
// are all TOKENs.
static StatusOr<Shape> InferAfterAllShape(
- tensorflow::gtl::ArraySlice<const Shape*> arg_shapes);
+ absl::Span<const Shape* const> arg_shapes);
// Helper that validates the given operand shape can be converted to the
// target output_shape via a convert instruction -- the requirement is that
@@ -266,8 +261,7 @@ class ShapeInference {
// Helper that validates the given arg_shapes are compatible with the shape of
// the to_apply parameters, and returns the to_apply result shape.
static StatusOr<Shape> InferCallShape(
- tensorflow::gtl::ArraySlice<const Shape*> arg_shapes,
- const ProgramShape& to_apply);
+ absl::Span<const Shape* const> arg_shapes, const ProgramShape& to_apply);
// Helper that infers the shape produced by performing a dot operation with
// the given LHS and RHS shapes.
@@ -281,7 +275,7 @@ class ShapeInference {
static StatusOr<Shape> InferGatherShape(
const Shape& input_shape, const Shape& start_indices_shape,
const GatherDimensionNumbers& gather_dim_numbers,
- tensorflow::gtl::ArraySlice<int64> slice_sizes);
+ absl::Span<const int64> slice_sizes);
// Helper that validates the given input shape, scatter indices shape, updates
// shape, and scatter dimension numbers that constitute a scatter operation,
@@ -299,7 +293,7 @@ class ShapeInference {
// even in the presence of broadcasting of one of the operands over the other.
static StatusOr<Shape> InferElementwiseBinaryOpShape(
HloOpcode operation, const Shape& lhs, const Shape& rhs,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ absl::Span<const int64> broadcast_dimensions);
// Helper for inferring the shape of Clamp ops.
static StatusOr<Shape> InferClampShape(const Shape& min, const Shape& operand,
@@ -327,7 +321,7 @@ class ShapeInference {
// smaller_shape is broadcast to.
static StatusOr<Shape> InferInDimBroadcastShape(
const Shape& smaller_shape, const Shape& larger_shape,
- tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
+ absl::Span<const int64> broadcast_dimensions);
TF_DISALLOW_COPY_AND_ASSIGN(ShapeInference);
};
diff --git a/tensorflow/compiler/xla/service/shape_inference_test.cc b/tensorflow/compiler/xla/service/shape_inference_test.cc
index 4ed8fc6b86..5dbe5a1611 100644
--- a/tensorflow/compiler/xla/service/shape_inference_test.cc
+++ b/tensorflow/compiler/xla/service/shape_inference_test.cc
@@ -28,7 +28,6 @@ limitations under the License.
namespace xla {
namespace {
-using ::tensorflow::gtl::ArraySlice;
using ::testing::ContainsRegex;
using ::testing::HasSubstr;
@@ -58,9 +57,9 @@ class ReduceShapeInferenceTest : public ShapeInferenceTest {
// Helper that runs reduce shape inference with the input 'arg' and given
// dimensions to reduce, and checks the inferred shape is as expected. The
// element type here is hard-coded to F32.
- void ExpectInferredReduceShape(
- const Shape& expected_inferred_shape, const Shape& arg,
- tensorflow::gtl::ArraySlice<int64> dimensions_to_reduce) {
+ void ExpectInferredReduceShape(const Shape& expected_inferred_shape,
+ const Shape& arg,
+ absl::Span<const int64> dimensions_to_reduce) {
ProgramShape to_apply = ShapeUtil::MakeProgramShape({f32_, f32_}, f32_);
auto inferred_status = ShapeInference::InferReduceShape(
{&arg, &f32_}, dimensions_to_reduce, to_apply);
@@ -252,7 +251,7 @@ TEST_F(ShapeInferenceTest, ClampBadShapes) {
TEST_F(ShapeInferenceTest, Complex) {
auto complex_shape = [&](const Shape& lhs, const Shape& rhs,
- const tensorflow::gtl::ArraySlice<int64>& bcast) {
+ const absl::Span<const int64>& bcast) {
return ShapeInference::InferBinaryOpShape(HloOpcode::kComplex, lhs, rhs,
bcast);
};
diff --git a/tensorflow/compiler/xla/service/transfer_manager.h b/tensorflow/compiler/xla/service/transfer_manager.h
index f77690a462..0c393c53a1 100644
--- a/tensorflow/compiler/xla/service/transfer_manager.h
+++ b/tensorflow/compiler/xla/service/transfer_manager.h
@@ -130,7 +130,7 @@ class TransferManager {
// Resets the devices associated with this transfer manager.
virtual Status ResetDevices(
- tensorflow::gtl::ArraySlice<se::StreamExecutor*> executor) = 0;
+ absl::Span<se::StreamExecutor* const> executor) = 0;
// Given an allocated ShapedBuffer, constructs the tuple index table(s) in
// each buffer of the given ShapedBuffer corresponding to tuple shapes. If the
@@ -211,8 +211,7 @@ class TransferManager {
// to construct a tuple index table in the platform-specific tuple
// representation.
virtual Status WriteSingleTupleIndexTable(
- se::Stream* stream,
- tensorflow::gtl::ArraySlice<se::DeviceMemoryBase> elements,
+ se::Stream* stream, absl::Span<const se::DeviceMemoryBase> elements,
const Shape& shape, se::DeviceMemoryBase* region) = 0;
private:
diff --git a/tensorflow/compiler/xla/service/tuple_points_to_analysis.cc b/tensorflow/compiler/xla/service/tuple_points_to_analysis.cc
index cf00ca102b..6fed7c76d0 100644
--- a/tensorflow/compiler/xla/service/tuple_points_to_analysis.cc
+++ b/tensorflow/compiler/xla/service/tuple_points_to_analysis.cc
@@ -360,7 +360,7 @@ Status TuplePointsToAnalysis::HandleSend(HloInstruction* send) {
}
Status TuplePointsToAnalysis::HandleTuple(HloInstruction* tuple) {
- tensorflow::gtl::ArraySlice<HloInstruction*> operands(tuple->operands());
+ absl::Span<HloInstruction* const> operands(tuple->operands());
PointsToSet& points_to_set = CreateEmptyPointsToSet(tuple);
points_to_set.AddPointedToBuffer(
logical_buffer_analysis_->GetBuffer(tuple, /*index=*/{}),
diff --git a/tensorflow/compiler/xla/service/tuple_points_to_analysis_test.cc b/tensorflow/compiler/xla/service/tuple_points_to_analysis_test.cc
index 10d382e8ab..a32d1f9026 100644
--- a/tensorflow/compiler/xla/service/tuple_points_to_analysis_test.cc
+++ b/tensorflow/compiler/xla/service/tuple_points_to_analysis_test.cc
@@ -72,9 +72,8 @@ class TuplePointsToAnalysisTest : public HloTestBase {
// Checks that the given points-to set contains exactly (unordered) the given
// LogicalBuffers.
- void ExpectHasBuffers(
- const PointsToSet::BufferList& points_to_set,
- tensorflow::gtl::ArraySlice<const LogicalBuffer*> buffers) {
+ void ExpectHasBuffers(const PointsToSet::BufferList& points_to_set,
+ absl::Span<const LogicalBuffer* const> buffers) {
std::vector<const LogicalBuffer*> vec(buffers.begin(), buffers.end());
EXPECT_THAT(points_to_set, UnorderedElementsAreArray(vec));
}
@@ -83,7 +82,7 @@ class TuplePointsToAnalysisTest : public HloTestBase {
// top-level buffers of the given instructions.
void ExpectHasTopLevelBuffers(
const PointsToSet::BufferList& points_to_set,
- tensorflow::gtl::ArraySlice<HloInstruction*> instructions) {
+ absl::Span<HloInstruction* const> instructions) {
PointsToSet::BufferList buffers;
for (auto instruction : instructions) {
buffers.push_back(GetBuffer(instruction, /*index=*/{}));
@@ -94,7 +93,7 @@ class TuplePointsToAnalysisTest : public HloTestBase {
// Overload which takes a set instead of a vector.
void ExpectHasTopLevelBuffers(
const PointsToSet::BufferSet& points_to_set,
- tensorflow::gtl::ArraySlice<HloInstruction*> instructions) {
+ absl::Span<HloInstruction* const> instructions) {
ExpectHasTopLevelBuffers(
PointsToSet::BufferList(points_to_set.begin(), points_to_set.end()),
instructions);
@@ -104,8 +103,7 @@ class TuplePointsToAnalysisTest : public HloTestBase {
// aliases which are exactly (unordered) the given instruction/index pairs.
void ExpectHasBufferAliases(
const HloInstruction* instruction, const ShapeIndex& index,
- tensorflow::gtl::ArraySlice<std::pair<HloInstruction*, ShapeIndex>>
- expected) {
+ absl::Span<const std::pair<HloInstruction*, ShapeIndex>> expected) {
const LogicalBuffer* buffer =
points_to_analysis_->GetBufferDefinedAt(instruction, index)
.ValueOrDie();
diff --git a/tensorflow/compiler/xla/service/tuple_util.cc b/tensorflow/compiler/xla/service/tuple_util.cc
index 4a530bb0b2..9ba01ef7a6 100644
--- a/tensorflow/compiler/xla/service/tuple_util.cc
+++ b/tensorflow/compiler/xla/service/tuple_util.cc
@@ -40,7 +40,7 @@ namespace xla {
/*static*/ HloInstruction* TupleUtil::AppendSuffix(
HloInstruction* input_tuple,
- tensorflow::gtl::ArraySlice<HloInstruction*> trailing_values) {
+ absl::Span<HloInstruction* const> trailing_values) {
CHECK(ShapeUtil::IsTuple(input_tuple->shape()));
HloComputation* computation = input_tuple->parent();
diff --git a/tensorflow/compiler/xla/service/tuple_util.h b/tensorflow/compiler/xla/service/tuple_util.h
index e5ff9aaa83..bc5aac09f2 100644
--- a/tensorflow/compiler/xla/service/tuple_util.h
+++ b/tensorflow/compiler/xla/service/tuple_util.h
@@ -38,7 +38,7 @@ class TupleUtil {
// `input_tuple`.
static HloInstruction* AppendSuffix(
HloInstruction* input_tuple,
- tensorflow::gtl::ArraySlice<HloInstruction*> trailing_values);
+ absl::Span<HloInstruction* const> trailing_values);
};
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/while_loop_analysis.cc b/tensorflow/compiler/xla/service/while_loop_analysis.cc
index 7e4ac92a7c..c3c2603c7e 100644
--- a/tensorflow/compiler/xla/service/while_loop_analysis.cc
+++ b/tensorflow/compiler/xla/service/while_loop_analysis.cc
@@ -211,8 +211,7 @@ optional<int64> ComputeWhileLoopTripCount(HloInstruction* while_op,
VLOG(2) << "Couldn't evaluate while cond: " << result.status();
return nullopt;
}
- if (result.ValueOrDie()->data<bool>() ==
- tensorflow::gtl::ArraySlice<bool>{false}) {
+ if (result.ValueOrDie()->data<bool>() == absl::Span<const bool>{false}) {
VLOG(2) << "Loop has static trip count of " << trip_count;
return trip_count;
}
diff --git a/tensorflow/compiler/xla/service/while_util.cc b/tensorflow/compiler/xla/service/while_util.cc
index e8f76ff745..f90ac91f9d 100644
--- a/tensorflow/compiler/xla/service/while_util.cc
+++ b/tensorflow/compiler/xla/service/while_util.cc
@@ -94,7 +94,7 @@ WidenWhileBody(HloComputation* narrow_body, const Shape& wide_shape) {
/*static*/ StatusOr<WhileUtil::MakeInstructionsLiveInResult>
WhileUtil::MakeInstructionsLiveIn(
HloInstruction* while_instr,
- tensorflow::gtl::ArraySlice<HloInstruction*> instructions) {
+ absl::Span<HloInstruction* const> instructions) {
CHECK(ShapeUtil::IsTuple(while_instr->shape()));
int64 elements_in_old_while_shape = while_instr->shape().tuple_shapes_size();
diff --git a/tensorflow/compiler/xla/service/while_util.h b/tensorflow/compiler/xla/service/while_util.h
index e67636d80f..b1c4486887 100644
--- a/tensorflow/compiler/xla/service/while_util.h
+++ b/tensorflow/compiler/xla/service/while_util.h
@@ -55,7 +55,7 @@ class WhileUtil {
// that contains `while_instr`.
static StatusOr<MakeInstructionsLiveInResult> MakeInstructionsLiveIn(
HloInstruction* while_instr,
- tensorflow::gtl::ArraySlice<HloInstruction*> instructions);
+ absl::Span<HloInstruction* const> instructions);
using LoopStateTy = std::vector<HloInstruction*>;
using LoopBodyGeneratorTy = std::function<StatusOr<LoopStateTy>(
diff --git a/tensorflow/compiler/xla/shape_util.cc b/tensorflow/compiler/xla/shape_util.cc
index 6d016abfde..9772c06bce 100644
--- a/tensorflow/compiler/xla/shape_util.cc
+++ b/tensorflow/compiler/xla/shape_util.cc
@@ -139,8 +139,8 @@ bool CompareShapes(const Shape& lhs, const Shape& rhs, bool compare_layouts,
// Constructs and returns the new shape with the given minor_to_major order in
// its Layout.
StatusOr<Shape> MakeShapeWithLayoutInternal(
- PrimitiveType element_type, tensorflow::gtl::ArraySlice<int64> dimensions,
- tensorflow::gtl::ArraySlice<int64> minor_to_major) {
+ PrimitiveType element_type, absl::Span<const int64> dimensions,
+ absl::Span<const int64> minor_to_major) {
if (dimensions.size() != minor_to_major.size()) {
return InvalidArgument("Dimensions size is %ld, but layout size is %ld.",
dimensions.size(), minor_to_major.size());
@@ -214,8 +214,8 @@ StatusOr<Shape> MakeShapeWithLayoutInternal(
return program_shape;
}
-/* static */ Shape ShapeUtil::MakeShape(
- PrimitiveType element_type, tensorflow::gtl::ArraySlice<int64> dimensions) {
+/* static */ Shape ShapeUtil::MakeShape(PrimitiveType element_type,
+ absl::Span<const int64> dimensions) {
CHECK(IsArrayPrimitiveType(element_type));
Shape result;
PopulateShape(element_type, dimensions, &result);
@@ -223,21 +223,21 @@ StatusOr<Shape> MakeShapeWithLayoutInternal(
}
/* static */ Shape ShapeUtil::MakeShapeWithLayout(
- PrimitiveType element_type, tensorflow::gtl::ArraySlice<int64> dimensions,
- tensorflow::gtl::ArraySlice<int64> minor_to_major) {
+ PrimitiveType element_type, absl::Span<const int64> dimensions,
+ absl::Span<const int64> minor_to_major) {
return MakeShapeWithLayoutInternal(element_type, dimensions, minor_to_major)
.ValueOrDie();
}
/* static */ Shape ShapeUtil::MakeShapeWithDescendingLayout(
- PrimitiveType element_type, tensorflow::gtl::ArraySlice<int64> dimensions) {
+ PrimitiveType element_type, absl::Span<const int64> dimensions) {
std::vector<int64> layout(dimensions.size());
std::iota(layout.rbegin(), layout.rend(), static_cast<int64>(0));
return MakeShapeWithLayout(element_type, dimensions, layout);
}
/* static */ Shape ShapeUtil::MakeShapeWithSparseLayout(
- PrimitiveType element_type, tensorflow::gtl::ArraySlice<int64> dimensions,
+ PrimitiveType element_type, absl::Span<const int64> dimensions,
int64 max_sparse_elements) {
CHECK(IsArrayPrimitiveType(element_type));
Shape shape = ShapeUtil::MakeShape(element_type, dimensions);
@@ -256,9 +256,9 @@ ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(
return MakeShapeWithDescendingLayout(shape.element_type(), dims);
}
-/* static */ void ShapeUtil::PopulateShape(
- PrimitiveType element_type, tensorflow::gtl::ArraySlice<int64> dimensions,
- Shape* shape) {
+/* static */ void ShapeUtil::PopulateShape(PrimitiveType element_type,
+ absl::Span<const int64> dimensions,
+ Shape* shape) {
shape->Clear();
shape->set_element_type(element_type);
for (int64 dimension : dimensions) {
@@ -268,8 +268,7 @@ ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(
TF_DCHECK_OK(ValidateShape(*shape));
}
-/* static */ Shape ShapeUtil::MakeTupleShape(
- tensorflow::gtl::ArraySlice<Shape> shapes) {
+/* static */ Shape ShapeUtil::MakeTupleShape(absl::Span<const Shape> shapes) {
Shape result;
result.set_element_type(TUPLE);
result.mutable_tuple_shapes()->Reserve(shapes.size());
@@ -791,7 +790,7 @@ StatusOr<Shape> ParseShapeStringInternal(absl::string_view* s) {
allocated_element_count = LayoutUtil::MaxSparseElements(shape.layout());
} else {
CHECK(LayoutUtil::IsDenseArray(shape)) << shape.ShortDebugString();
- tensorflow::gtl::ArraySlice<int64> padded_dimensions =
+ absl::Span<const int64> padded_dimensions =
LayoutUtil::PaddedDimensions(shape);
if (!padded_dimensions.empty()) {
CHECK_EQ(Rank(shape), padded_dimensions.size());
@@ -1115,7 +1114,7 @@ Status ForEachMutableSubshapeHelper(
}
/* static */ Shape ShapeUtil::PermuteDimensions(
- tensorflow::gtl::ArraySlice<int64> permutation, const Shape& shape) {
+ absl::Span<const int64> permutation, const Shape& shape) {
Shape new_shape = shape;
new_shape.clear_dimensions();
for (auto dim : Permute(permutation, shape.dimensions())) {
@@ -1259,7 +1258,7 @@ ShapeUtil::DimensionsUnmodifiedByReshape(const Shape& input_shape,
/* static */ bool ShapeUtil::TransposeIsBitcast(
const Shape& input_shape, const Shape& output_shape,
- tensorflow::gtl::ArraySlice<int64> dimension_mapping) {
+ absl::Span<const int64> dimension_mapping) {
CHECK(LayoutUtil::HasLayout(input_shape) &&
LayoutUtil::HasLayout(output_shape));
diff --git a/tensorflow/compiler/xla/shape_util.h b/tensorflow/compiler/xla/shape_util.h
index 83e58545bf..8e2b2cb331 100644
--- a/tensorflow/compiler/xla/shape_util.h
+++ b/tensorflow/compiler/xla/shape_util.h
@@ -147,7 +147,7 @@ class ShapeIndexView {
string ToString() const;
private:
- tensorflow::gtl::ArraySlice<int64> indices_;
+ absl::Span<const int64> indices_;
};
std::ostream& operator<<(std::ostream& out, const ShapeIndex& shape_index);
@@ -328,7 +328,7 @@ class ShapeUtil {
static Shape ChangeElementType(const Shape& original, PrimitiveType type);
// Creates a tuple shape from a slice of element shapes within the tuple.
- static Shape MakeTupleShape(tensorflow::gtl::ArraySlice<Shape> shapes);
+ static Shape MakeTupleShape(absl::Span<const Shape> shapes);
// Creates an opaque shape. These are generally used for threading a context
// into a custom operation.
@@ -355,31 +355,29 @@ class ShapeUtil {
// Constructs a new shape with the given element type and sequence of
// dimensions.
static Shape MakeShape(PrimitiveType element_type,
- tensorflow::gtl::ArraySlice<int64> dimensions);
+ absl::Span<const int64> dimensions);
// Creates a Shape with element type corresponding to T and the given
// dimensions
template <typename T>
- static Shape MakeShapeWithType(
- tensorflow::gtl::ArraySlice<int64> dimensions) {
+ static Shape MakeShapeWithType(absl::Span<const int64> dimensions) {
return ShapeUtil::MakeShape(primitive_util::NativeToPrimitiveType<T>(),
dimensions);
}
// Constructs a new shape with the given minor_to_major order in its Layout.
// Returns a value shape such that shape.has_layout().
- static Shape MakeShapeWithLayout(
- PrimitiveType element_type, tensorflow::gtl::ArraySlice<int64> dimensions,
- tensorflow::gtl::ArraySlice<int64> minor_to_major);
+ static Shape MakeShapeWithLayout(PrimitiveType element_type,
+ absl::Span<const int64> dimensions,
+ absl::Span<const int64> minor_to_major);
- static Shape MakeShapeWithSparseLayout(
- PrimitiveType element_type, tensorflow::gtl::ArraySlice<int64> dimensions,
- int64 max_sparse_elements);
+ static Shape MakeShapeWithSparseLayout(PrimitiveType element_type,
+ absl::Span<const int64> dimensions,
+ int64 max_sparse_elements);
// Constructs a new shape with major-first layout (i.e. {n, n-1, ..., 0}).
static Shape MakeShapeWithDescendingLayout(
- PrimitiveType element_type,
- tensorflow::gtl::ArraySlice<int64> dimensions);
+ PrimitiveType element_type, absl::Span<const int64> dimensions);
// Returns a new Shape based on the given Shape with low-dimension-major
// layout (i.e. {n, n-1, ..., 0}, like Fortran), and with the dimensions
@@ -391,8 +389,7 @@ class ShapeUtil {
// As MakeShape, but the object to write to is passed in.
static void PopulateShape(PrimitiveType element_type,
- tensorflow::gtl::ArraySlice<int64> dimensions,
- Shape* shape);
+ absl::Span<const int64> dimensions, Shape* shape);
// Validates that the provided shape satisfies invariants.
static Status ValidateShape(const Shape& shape);
@@ -539,7 +536,7 @@ class ShapeUtil {
// !HasLayout(shape) ||
// TransposeIsBitcast(shape, PermuteDimensions(permutation, shape),
// InversePermutation(permutation)).
- static Shape PermuteDimensions(tensorflow::gtl::ArraySlice<int64> permutation,
+ static Shape PermuteDimensions(absl::Span<const int64> permutation,
const Shape& shape);
// If we can go from `shape_pre` to `shape_post` by merely inserting or
@@ -580,9 +577,9 @@ class ShapeUtil {
// to its input and thus may be replaced with a bitcast.
//
// Precondition: Both input_shape and output_shape have explicit layouts.
- static bool TransposeIsBitcast(
- const Shape& input_shape, const Shape& output_shape,
- tensorflow::gtl::ArraySlice<int64> dimension_mapping);
+ static bool TransposeIsBitcast(const Shape& input_shape,
+ const Shape& output_shape,
+ absl::Span<const int64> dimension_mapping);
// Returns whether a reshape from "input_shape" to "output_shape" is a
// bitcast.
@@ -621,12 +618,12 @@ class ShapeUtil {
// continue, or false otherwise.
//
// visitor_function must be a callable of type
- // StatusOr<bool>(ArraySlice<int64>) or compatible.
+ // StatusOr<bool>(Span<int64>) or compatible.
template <typename FnType>
static Status ForEachIndexWithStatus(const Shape& shape,
- tensorflow::gtl::ArraySlice<int64> base,
- tensorflow::gtl::ArraySlice<int64> count,
- tensorflow::gtl::ArraySlice<int64> incr,
+ absl::Span<const int64> base,
+ absl::Span<const int64> count,
+ absl::Span<const int64> incr,
const FnType& visitor_function) {
return ForEachIndexInternal(shape, base, count, incr, visitor_function);
}
@@ -648,13 +645,12 @@ class ShapeUtil {
}
template <typename FnType>
- static void ForEachIndex(const Shape& shape,
- tensorflow::gtl::ArraySlice<int64> base,
- tensorflow::gtl::ArraySlice<int64> count,
- tensorflow::gtl::ArraySlice<int64> incr,
+ static void ForEachIndex(const Shape& shape, absl::Span<const int64> base,
+ absl::Span<const int64> count,
+ absl::Span<const int64> incr,
const FnType& visitor_function) {
ForEachIndexWithStatus(shape, base, count, incr,
- [&](tensorflow::gtl::ArraySlice<int64> indices) {
+ [&](absl::Span<const int64> indices) {
return StatusOr<bool>(visitor_function(indices));
})
.IgnoreError();
@@ -676,7 +672,7 @@ class ShapeUtil {
template <typename FnType>
static void ForEachIndex(const Shape& shape, const FnType& visitor_function) {
ForEachIndexWithStatus(shape,
- [&](tensorflow::gtl::ArraySlice<int64> indices) {
+ [&](absl::Span<const int64> indices) {
return StatusOr<bool>(visitor_function(indices));
})
.IgnoreError();
@@ -687,18 +683,18 @@ class ShapeUtil {
// matter.
//
// visitor_function must be a callable of type
- // void(ArraySlice<int64>) or compatible.
+ // void(Span<int64>) or compatible.
template <typename FnType>
static void ForEachIndexParallel(const Shape& shape,
- tensorflow::gtl::ArraySlice<int64> base,
- tensorflow::gtl::ArraySlice<int64> count,
- tensorflow::gtl::ArraySlice<int64> incr,
+ absl::Span<const int64> base,
+ absl::Span<const int64> count,
+ absl::Span<const int64> incr,
const FnType& visitor_function) {
// The parallel version of ForEachIndexInternal can never fail.
CHECK(ForEachIndexInternal(
shape, base, count, incr,
- [&visitor_function](tensorflow::gtl::ArraySlice<int64> indexes)
- -> StatusOr<bool> {
+ [&visitor_function](
+ absl::Span<const int64> indexes) -> StatusOr<bool> {
visitor_function(indexes);
return true;
},
@@ -720,9 +716,9 @@ class ShapeUtil {
template <typename FnType>
static Status ForEachIndexInternal(const Shape& shape,
- tensorflow::gtl::ArraySlice<int64> base,
- tensorflow::gtl::ArraySlice<int64> count,
- tensorflow::gtl::ArraySlice<int64> incr,
+ absl::Span<const int64> base,
+ absl::Span<const int64> count,
+ absl::Span<const int64> incr,
const FnType& visitor_function,
bool parallel = false) {
if (ShapeUtil::IsZeroElementArray(shape)) {
diff --git a/tensorflow/compiler/xla/shape_util_test.cc b/tensorflow/compiler/xla/shape_util_test.cc
index 7549ba9c78..6ca4085aaf 100644
--- a/tensorflow/compiler/xla/shape_util_test.cc
+++ b/tensorflow/compiler/xla/shape_util_test.cc
@@ -705,11 +705,10 @@ TEST(ShapeUtilTest, ForEachIndex) {
Shape shape = ShapeUtil::MakeShape(F32, data.dimensions);
// Increments at every invocation.
int invocations = 0;
- auto increment_func =
- [&invocations](tensorflow::gtl::ArraySlice<int64> indexes) {
- invocations++;
- return true;
- };
+ auto increment_func = [&invocations](absl::Span<const int64> indexes) {
+ invocations++;
+ return true;
+ };
std::vector<int64> zero_base(data.dimensions.size(), 0);
std::vector<int64> step(data.dimensions.size(), 1);
@@ -726,8 +725,7 @@ TEST(ShapeUtilTest, ForEachIndexWithStatus) {
// Increments at every invocation.
int invocations = 0;
auto increment_func =
- [&invocations](
- tensorflow::gtl::ArraySlice<int64> indexes) -> StatusOr<bool> {
+ [&invocations](absl::Span<const int64> indexes) -> StatusOr<bool> {
if (++invocations == 5) {
return Unimplemented("Cannot increment beyond 5.");
}
@@ -748,7 +746,7 @@ TEST(ShapeUtilTest, ForEachIndexParallel) {
Shape shape = ShapeUtil::MakeShape(F32, {10, 10});
int64 output[10][10];
int init = 5;
- auto set_func = [&](tensorflow::gtl::ArraySlice<int64> indexes) {
+ auto set_func = [&](absl::Span<const int64> indexes) {
output[indexes[0]][indexes[1]] = init + indexes[0] + indexes[1];
};
diff --git a/tensorflow/compiler/xla/sparse_index_array.cc b/tensorflow/compiler/xla/sparse_index_array.cc
index 31844abd89..1c135dda86 100644
--- a/tensorflow/compiler/xla/sparse_index_array.cc
+++ b/tensorflow/compiler/xla/sparse_index_array.cc
@@ -33,7 +33,7 @@ SparseIndexArray::SparseIndexArray(int64 max_indices, int64 rank,
}
SparseIndexArray::SparseIndexArray(int64 max_indices, int64 rank,
- tensorflow::gtl::ArraySlice<int64> indices)
+ absl::Span<const int64> indices)
: SparseIndexArray(max_indices, rank,
std::vector<int64>(indices.begin(), indices.end())) {}
@@ -48,25 +48,24 @@ int64 SparseIndexArray::index_count() const {
return indices_.size() / rank_;
}
-tensorflow::gtl::ArraySlice<int64> SparseIndexArray::At(
+absl::Span<const int64> SparseIndexArray::At(
int64 sparse_element_number) const {
CHECK_GT(rank_, 0);
CHECK_GE(sparse_element_number, 0);
CHECK_LE(rank_ * sparse_element_number + rank_, indices_.size());
- return tensorflow::gtl::ArraySlice<int64>(
+ return absl::Span<const int64>(
indices_.data() + rank_ * sparse_element_number, rank_);
}
-tensorflow::gtl::MutableArraySlice<int64> SparseIndexArray::At(
- int64 sparse_element_number) {
+absl::Span<int64> SparseIndexArray::At(int64 sparse_element_number) {
CHECK_GT(rank_, 0);
CHECK_GE(sparse_element_number, 0);
CHECK_LE(rank_ * sparse_element_number + rank_, indices_.size());
- return tensorflow::gtl::MutableArraySlice<int64>(
- indices_.data() + rank_ * sparse_element_number, rank_);
+ return absl::Span<int64>(indices_.data() + rank_ * sparse_element_number,
+ rank_);
}
-void SparseIndexArray::Append(tensorflow::gtl::ArraySlice<int64> index) {
+void SparseIndexArray::Append(absl::Span<const int64> index) {
CHECK_GT(rank_, 0);
CHECK_EQ(index.size(), rank_);
indices_.insert(indices_.end(), index.begin(), index.end());
@@ -90,12 +89,12 @@ bool SparseIndexArray::Validate(const Shape& shape) const {
if (num_indices < 2) {
return true;
}
- tensorflow::gtl::ArraySlice<int64> last = At(0);
+ absl::Span<const int64> last = At(0);
if (!IndexUtil::IndexInBounds(shape, last)) {
return false;
}
for (int64 n = 1; n < num_indices; ++n) {
- tensorflow::gtl::ArraySlice<int64> next = At(n);
+ absl::Span<const int64> next = At(n);
if (!IndexUtil::IndexInBounds(shape, next)) {
return false;
}
diff --git a/tensorflow/compiler/xla/sparse_index_array.h b/tensorflow/compiler/xla/sparse_index_array.h
index 7291705b61..6c70fd0a06 100644
--- a/tensorflow/compiler/xla/sparse_index_array.h
+++ b/tensorflow/compiler/xla/sparse_index_array.h
@@ -65,7 +65,7 @@ class SparseIndexArray {
SparseIndexArray(int64 max_indices, int64 rank,
std::vector<int64> indices = {});
SparseIndexArray(int64 max_indices, int64 rank,
- tensorflow::gtl::ArraySlice<int64> indices);
+ absl::Span<const int64> indices);
// Returns the number of elements represented by the indices stored in the
// array.
@@ -73,12 +73,12 @@ class SparseIndexArray {
// Returns a slice that refers to the given sparse index number. The argument
// must be in the range [0, element_count()).
- tensorflow::gtl::ArraySlice<int64> At(int64 sparse_element_number) const;
- tensorflow::gtl::MutableArraySlice<int64> At(int64 sparse_element_number);
+ absl::Span<const int64> At(int64 sparse_element_number) const;
+ absl::Span<int64> At(int64 sparse_element_number);
// Adds the given index at the end of the array. The new size of the
// SparseIndexArray must not exceed `max_indices`.
- void Append(tensorflow::gtl::ArraySlice<int64> index);
+ void Append(absl::Span<const int64> index);
// Removes all indices from the array.
void Clear();
@@ -96,10 +96,8 @@ class SparseIndexArray {
int64 max_indices() const { return max_indices_; }
// Returns a pointer to the int64 array that holds the sparse indices.
- tensorflow::gtl::MutableArraySlice<int64> mutable_data() {
- return absl::MakeSpan(indices_);
- }
- tensorflow::gtl::ArraySlice<int64> data() const { return indices_; }
+ absl::Span<int64> mutable_data() { return absl::MakeSpan(indices_); }
+ absl::Span<const int64> data() const { return indices_; }
// Sorts this sparse index array along with the set of corresponding values.
// The indices and values are sorted in the lexicographic order of the
@@ -117,7 +115,7 @@ class SparseIndexArray {
// std::cout << v[0] << ", " << v[1] << ", " << v[2] << std::endl;
//
template <typename NativeT>
- void SortWithValues(tensorflow::gtl::MutableArraySlice<NativeT> values);
+ void SortWithValues(absl::Span<NativeT> values);
private:
std::vector<int64> indices_;
@@ -126,8 +124,7 @@ class SparseIndexArray {
};
template <typename NativeT>
-void SparseIndexArray::SortWithValues(
- tensorflow::gtl::MutableArraySlice<NativeT> values) {
+void SparseIndexArray::SortWithValues(absl::Span<NativeT> values) {
int64 num_elements = index_count();
CHECK_EQ(values.size(), num_elements);
std::vector<int64> sort_order;
diff --git a/tensorflow/compiler/xla/tests/array_elementwise_ops_test.cc b/tensorflow/compiler/xla/tests/array_elementwise_ops_test.cc
index 577fd1ab3b..55dcf2817b 100644
--- a/tensorflow/compiler/xla/tests/array_elementwise_ops_test.cc
+++ b/tensorflow/compiler/xla/tests/array_elementwise_ops_test.cc
@@ -41,7 +41,6 @@ limitations under the License.
namespace xla {
namespace {
-using tensorflow::gtl::ArraySlice;
class ArrayElementwiseOpTest : public ClientLibraryTestBase {
public:
@@ -433,8 +432,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, DivTwoConstantZeroElementF32s) {
class IntegerDivideOpTest : public ArrayElementwiseOpTest {
protected:
template <typename T>
- void TestDivRem(ArraySlice<T> dividends, ArraySlice<T> divisors,
- ArraySlice<T> quotients, ArraySlice<T> remainders) {
+ void TestDivRem(absl::Span<const T> dividends, absl::Span<const T> divisors,
+ absl::Span<const T> quotients,
+ absl::Span<const T> remainders) {
{
XlaBuilder builder(TestName());
XlaOp dividend;
diff --git a/tensorflow/compiler/xla/tests/broadcast_simple_test.cc b/tensorflow/compiler/xla/tests/broadcast_simple_test.cc
index 1d28e85b16..fe4267c73b 100644
--- a/tensorflow/compiler/xla/tests/broadcast_simple_test.cc
+++ b/tensorflow/compiler/xla/tests/broadcast_simple_test.cc
@@ -53,10 +53,11 @@ class BroadcastSimpleTest : public ClientLibraryTestBase {
}
}
- std::unique_ptr<GlobalData> MakeR3Data(
- tensorflow::gtl::ArraySlice<int64> bounds,
- tensorflow::gtl::ArraySlice<int64> minor_to_major, Shape* r3_shape,
- Array3D<float>* r3_array, float start, float end, int seed) {
+ std::unique_ptr<GlobalData> MakeR3Data(absl::Span<const int64> bounds,
+ absl::Span<const int64> minor_to_major,
+ Shape* r3_shape,
+ Array3D<float>* r3_array, float start,
+ float end, int seed) {
*r3_shape = ShapeUtil::MakeShapeWithLayout(F32, bounds, minor_to_major);
r3_array->FillRandom(start, end, seed);
auto r3_data = LiteralUtil::CreateR3FromArray3D(*r3_array)->Relayout(
@@ -66,10 +67,11 @@ class BroadcastSimpleTest : public ClientLibraryTestBase {
return r3_global_data;
}
- std::unique_ptr<GlobalData> MakeR2Data(
- tensorflow::gtl::ArraySlice<int64> bounds,
- tensorflow::gtl::ArraySlice<int64> minor_to_major, Shape* r2_shape,
- Array2D<float>* r2_array, float start, float end, int seed) {
+ std::unique_ptr<GlobalData> MakeR2Data(absl::Span<const int64> bounds,
+ absl::Span<const int64> minor_to_major,
+ Shape* r2_shape,
+ Array2D<float>* r2_array, float start,
+ float end, int seed) {
*r2_shape = ShapeUtil::MakeShapeWithLayout(F32, bounds, minor_to_major);
r2_array->FillRandom(start, end, seed);
auto r2_data = LiteralUtil::CreateR2FromArray2D(*r2_array)->Relayout(
@@ -348,7 +350,7 @@ XLA_TEST_P(BroadcastR3ImplicitTest, Doit) {
Array3D<float> expected_array(spec.output_bounds[0], spec.output_bounds[1],
spec.output_bounds[2]);
- auto Each = ([&](tensorflow::gtl::ArraySlice<int64> indices, float* value) {
+ auto Each = ([&](absl::Span<const int64> indices, float* value) {
float r3_implicit = r3_implicit_array(indices[0] % spec.input_bounds[0],
indices[1] % spec.input_bounds[1],
indices[2] % spec.input_bounds[2]);
diff --git a/tensorflow/compiler/xla/tests/client_library_test_base.cc b/tensorflow/compiler/xla/tests/client_library_test_base.cc
index f1ab83df82..8a236db0ff 100644
--- a/tensorflow/compiler/xla/tests/client_library_test_base.cc
+++ b/tensorflow/compiler/xla/tests/client_library_test_base.cc
@@ -95,15 +95,14 @@ string ClientLibraryTestBase::TestName() const {
}
StatusOr<std::unique_ptr<GlobalData>> ClientLibraryTestBase::Execute(
- XlaBuilder* builder, tensorflow::gtl::ArraySlice<GlobalData*> arguments) {
+ XlaBuilder* builder, absl::Span<GlobalData* const> arguments) {
// Build the computation, as a convenience.
TF_ASSIGN_OR_RETURN(auto computation, builder->Build());
return client_->Execute(computation, arguments, &execution_options_);
}
StatusOr<std::unique_ptr<Literal>> ClientLibraryTestBase::ExecuteAndTransfer(
- const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments,
+ const XlaComputation& computation, absl::Span<GlobalData* const> arguments,
const Shape* shape_with_output_layout) {
ExecutionOptions execution_options = execution_options_;
if (shape_with_output_layout != nullptr) {
@@ -115,7 +114,7 @@ StatusOr<std::unique_ptr<Literal>> ClientLibraryTestBase::ExecuteAndTransfer(
}
StatusOr<std::unique_ptr<Literal>> ClientLibraryTestBase::ExecuteAndTransfer(
- XlaBuilder* builder, tensorflow::gtl::ArraySlice<GlobalData*> arguments,
+ XlaBuilder* builder, absl::Span<GlobalData* const> arguments,
const Shape* shape_with_output_layout) {
// Build the computation, as a convenience.
TF_ASSIGN_OR_RETURN(auto computation, builder->Build());
@@ -124,8 +123,7 @@ StatusOr<std::unique_ptr<Literal>> ClientLibraryTestBase::ExecuteAndTransfer(
StatusOr<std::unique_ptr<Literal>>
ClientLibraryTestBase::ExecuteAndTransferReference(
- const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments,
+ const XlaComputation& computation, absl::Span<GlobalData* const> arguments,
const Shape* shape_with_output_layout) {
ExecutionOptions execution_options = execution_options_;
if (shape_with_output_layout != nullptr) {
@@ -138,7 +136,7 @@ ClientLibraryTestBase::ExecuteAndTransferReference(
}
string ClientLibraryTestBase::ExecuteToString(
- XlaBuilder* builder, tensorflow::gtl::ArraySlice<GlobalData*> arguments) {
+ XlaBuilder* builder, absl::Span<GlobalData* const> arguments) {
auto computation_status = builder->Build();
if (!computation_status.ok()) {
return computation_status.status().ToString();
@@ -156,7 +154,7 @@ string ClientLibraryTestBase::ExecuteToString(
void ClientLibraryTestBase::ComputeAndCompareR1(
XlaBuilder* builder, const tensorflow::core::Bitmap& expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments) {
+ absl::Span<GlobalData* const> arguments) {
std::unique_ptr<Literal> expected_literal = LiteralUtil::CreateR1(expected);
ClientLibraryTestBase::ComputeAndCompareLiteral(builder, *expected_literal,
arguments);
@@ -164,15 +162,14 @@ void ClientLibraryTestBase::ComputeAndCompareR1(
void ClientLibraryTestBase::ComputeAndCompareLiteral(
XlaBuilder* builder, const Literal& expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments,
- const Shape* shape_with_layout) {
+ absl::Span<GlobalData* const> arguments, const Shape* shape_with_layout) {
EXPECT_IS_OK(ComputeAndCompareLiteralWithStatus(builder, expected, arguments,
shape_with_layout));
}
void ClientLibraryTestBase::ComputeAndCompareLiteral(
XlaBuilder* builder, const Literal& expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments, ErrorSpec error,
+ absl::Span<GlobalData* const> arguments, ErrorSpec error,
const Shape* shape_with_layout) {
EXPECT_IS_OK(ComputeAndCompareLiteralWithStatus(builder, expected, arguments,
error, shape_with_layout));
@@ -180,7 +177,7 @@ void ClientLibraryTestBase::ComputeAndCompareLiteral(
Status ClientLibraryTestBase::ComputeAndCompareLiteralWithAllOutputLayouts(
const xla::XlaComputation& computation, const Literal& expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments,
+ absl::Span<GlobalData* const> arguments,
const std::function<void(const Literal& actual,
const string& error_message)>& verify_output) {
// Try with no layout requirement.
@@ -205,7 +202,7 @@ Status ClientLibraryTestBase::ComputeAndCompareLiteralWithAllOutputLayouts(
Status ClientLibraryTestBase::ComputeAndCompareLiteralWithAllInputLayouts(
const xla::XlaComputation& computation, const Literal& /*expected*/,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments,
+ absl::Span<GlobalData* const> arguments,
const std::function<void(const Literal& actual,
const string& error_message)>& verify_output,
const Shape* output_with_layout) {
@@ -252,10 +249,9 @@ Status ClientLibraryTestBase::ComputeAndCompareLiteralWithAllInputLayouts(
// Every argument has an assigned layout.
TF_ASSIGN_OR_RETURN(
auto actual,
- ExecuteAndTransfer(
- computation,
- tensorflow::gtl::ArraySlice<GlobalData*>(arguments_with_layout),
- output_with_layout));
+ ExecuteAndTransfer(computation,
+ absl::Span<GlobalData* const>(arguments_with_layout),
+ output_with_layout));
string error_message = "Test with input layouts: ";
for (const auto& str : layout_strings) {
absl::StrAppend(&error_message, str, " ");
@@ -269,7 +265,7 @@ Status ClientLibraryTestBase::ComputeAndCompareLiteralWithAllInputLayouts(
Status ClientLibraryTestBase::ComputeAndCompareLiteralWithStatus(
XlaBuilder* builder, const Literal& expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments_passed_in,
+ absl::Span<GlobalData* const> arguments_passed_in,
const Shape* shape_with_layout) {
std::vector<GlobalData*> arguments(arguments_passed_in.begin(),
arguments_passed_in.end());
@@ -329,8 +325,8 @@ Status ClientLibraryTestBase::ComputeAndCompareLiteralWithStatus(
Status ClientLibraryTestBase::ComputeAndCompareLiteralWithStatus(
XlaBuilder* builder, const Literal& expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments_passed_in,
- ErrorSpec error, const Shape* shape_with_layout) {
+ absl::Span<GlobalData* const> arguments_passed_in, ErrorSpec error,
+ const Shape* shape_with_layout) {
std::vector<GlobalData*> arguments(arguments_passed_in.begin(),
arguments_passed_in.end());
@@ -386,7 +382,7 @@ Status ClientLibraryTestBase::ComputeAndCompareLiteralWithStatus(
void ClientLibraryTestBase::ComputeAndCompareR1U8(
XlaBuilder* builder, absl::string_view expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments) {
+ absl::Span<GlobalData* const> arguments) {
auto actual_status = ExecuteAndTransfer(builder, arguments);
EXPECT_IS_OK(actual_status.status());
if (!actual_status.ok()) {
@@ -405,7 +401,7 @@ void ClientLibraryTestBase::ComputeAndCompareR1U8(
void ClientLibraryTestBase::ComputeAndCompareTuple(
XlaBuilder* builder, const Literal& expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments) {
+ absl::Span<GlobalData* const> arguments) {
auto actual_status = ExecuteAndTransfer(builder, arguments);
EXPECT_IS_OK(actual_status.status());
if (!actual_status.ok()) {
@@ -417,7 +413,7 @@ void ClientLibraryTestBase::ComputeAndCompareTuple(
void ClientLibraryTestBase::ComputeAndCompareTuple(
XlaBuilder* builder, const Literal& expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments, ErrorSpec error) {
+ absl::Span<GlobalData* const> arguments, ErrorSpec error) {
auto actual_status = ExecuteAndTransfer(builder, arguments);
EXPECT_IS_OK(actual_status.status());
if (!actual_status.ok()) {
@@ -428,7 +424,7 @@ void ClientLibraryTestBase::ComputeAndCompareTuple(
}
void ClientLibraryTestBase::ComputeAndCompare(
- XlaBuilder* builder, tensorflow::gtl::ArraySlice<Literal> arguments) {
+ XlaBuilder* builder, absl::Span<const Literal> arguments) {
auto status_or_data = ComputeValueAndReference(builder, arguments);
EXPECT_IS_OK(status_or_data);
if (!status_or_data.ok()) {
@@ -440,8 +436,7 @@ void ClientLibraryTestBase::ComputeAndCompare(
}
void ClientLibraryTestBase::ComputeAndCompare(
- XlaBuilder* builder, tensorflow::gtl::ArraySlice<Literal> arguments,
- ErrorSpec error) {
+ XlaBuilder* builder, absl::Span<const Literal> arguments, ErrorSpec error) {
auto status_or_data = ComputeValueAndReference(builder, arguments);
EXPECT_IS_OK(status_or_data);
if (!status_or_data.ok()) {
@@ -454,7 +449,7 @@ void ClientLibraryTestBase::ComputeAndCompare(
StatusOr<std::pair<std::unique_ptr<Literal>, std::unique_ptr<Literal>>>
ClientLibraryTestBase::ComputeValueAndReference(
- XlaBuilder* builder, tensorflow::gtl::ArraySlice<Literal> arguments) {
+ XlaBuilder* builder, absl::Span<const Literal> arguments) {
// Transfer the arguments to the executor service. We put the unique_ptr's
// into a vector to keep the data alive on the service until the end of this
// function.
diff --git a/tensorflow/compiler/xla/tests/client_library_test_base.h b/tensorflow/compiler/xla/tests/client_library_test_base.h
index ac96d3e325..77954dd7f7 100644
--- a/tensorflow/compiler/xla/tests/client_library_test_base.h
+++ b/tensorflow/compiler/xla/tests/client_library_test_base.h
@@ -49,8 +49,8 @@ namespace xla {
// use_bfloat16_params with that value. Returns the result.
template <typename TestCase>
std::vector<TestCase> ExpandUseBfloat16(
- tensorflow::gtl::ArraySlice<bool> use_bfloat16_params,
- tensorflow::gtl::ArraySlice<TestCase> specs) {
+ absl::Span<const bool> use_bfloat16_params,
+ absl::Span<const TestCase> specs) {
std::vector<TestCase> expanded;
for (bool use_bfloat16 : use_bfloat16_params) {
for (const auto& spec : specs) {
@@ -93,15 +93,15 @@ class ClientLibraryTestBase : public ::testing::Test {
// execution options. Modify execution_options_ in your test if you want to
// customize the options.
StatusOr<std::unique_ptr<GlobalData>> Execute(
- XlaBuilder* builder, tensorflow::gtl::ArraySlice<GlobalData*> arguments);
+ XlaBuilder* builder, absl::Span<GlobalData* const> arguments);
StatusOr<std::unique_ptr<Literal>> ExecuteAndTransfer(
- XlaBuilder* builder, tensorflow::gtl::ArraySlice<GlobalData*> arguments,
+ XlaBuilder* builder, absl::Span<GlobalData* const> arguments,
const Shape* shape_with_output_layout = nullptr);
StatusOr<std::unique_ptr<Literal>> ExecuteAndTransfer(
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments,
+ absl::Span<GlobalData* const> arguments,
const Shape* shape_with_output_layout = nullptr);
// This executes the computation via the reference client (which connects a
@@ -109,13 +109,13 @@ class ClientLibraryTestBase : public ::testing::Test {
// computation.
StatusOr<std::unique_ptr<Literal>> ExecuteAndTransferReference(
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments,
+ absl::Span<GlobalData* const> arguments,
const Shape* shape_with_output_layout = nullptr);
// Run a computation and return its value as a string. If an error
// occurs, then instead return the error as a string.
string ExecuteToString(XlaBuilder* builder,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments);
+ absl::Span<GlobalData* const> arguments);
// Convenience methods for building and running a computation, transferring
// the result, and comparing it to the expected value(s). Methods are
@@ -125,102 +125,98 @@ class ClientLibraryTestBase : public ::testing::Test {
// for integral types without the ErrorSpec parameter.
template <typename NativeT>
void ComputeAndCompareR0(XlaBuilder* builder, NativeT expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments);
+ absl::Span<GlobalData* const> arguments);
template <typename NativeT>
void ComputeAndCompareR0(XlaBuilder* builder, NativeT expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments,
+ absl::Span<GlobalData* const> arguments,
ErrorSpec error);
template <typename NativeT>
void ComputeAndCompareR1(XlaBuilder* builder,
- tensorflow::gtl::ArraySlice<NativeT> expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments);
+ absl::Span<const NativeT> expected,
+ absl::Span<GlobalData* const> arguments);
template <typename NativeT>
void ComputeAndCompareR1(XlaBuilder* builder,
- tensorflow::gtl::ArraySlice<NativeT> expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments,
+ absl::Span<const NativeT> expected,
+ absl::Span<GlobalData* const> arguments,
ErrorSpec error);
// As above, but uses a bitmap to hold the predicate vector to avoid
// deficiencies of vector<bool>.
void ComputeAndCompareR1(XlaBuilder* builder,
const tensorflow::core::Bitmap& expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments);
+ absl::Span<GlobalData* const> arguments);
template <typename NativeT>
void ComputeAndCompareR2(XlaBuilder* builder,
const Array2D<NativeT>& expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments);
+ absl::Span<GlobalData* const> arguments);
template <typename NativeT>
void ComputeAndCompareR2(XlaBuilder* builder,
const Array2D<NativeT>& expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments,
+ absl::Span<GlobalData* const> arguments,
ErrorSpec error);
template <typename NativeT>
void ComputeAndCompareR3(XlaBuilder* builder,
const Array3D<NativeT>& expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments);
+ absl::Span<GlobalData* const> arguments);
template <typename NativeT>
void ComputeAndCompareR3(XlaBuilder* builder,
const Array3D<NativeT>& expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments,
+ absl::Span<GlobalData* const> arguments,
ErrorSpec error);
template <typename NativeT>
void ComputeAndCompareR4(XlaBuilder* builder,
const Array4D<NativeT>& expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments);
+ absl::Span<GlobalData* const> arguments);
template <typename NativeT>
void ComputeAndCompareR4(XlaBuilder* builder,
const Array4D<NativeT>& expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments,
+ absl::Span<GlobalData* const> arguments,
ErrorSpec error);
// Build and run the computation and compare the result with the given
// literal. shape_with_layout indicates the result layout to request when
// calling Execute.
- void ComputeAndCompareLiteral(
- XlaBuilder* builder, const Literal& expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments,
- const Shape* shape_with_layout = nullptr);
- void ComputeAndCompareLiteral(
- XlaBuilder* builder, const Literal& expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments, ErrorSpec error,
- const Shape* shape_with_layout = nullptr);
+ void ComputeAndCompareLiteral(XlaBuilder* builder, const Literal& expected,
+ absl::Span<GlobalData* const> arguments,
+ const Shape* shape_with_layout = nullptr);
+ void ComputeAndCompareLiteral(XlaBuilder* builder, const Literal& expected,
+ absl::Span<GlobalData* const> arguments,
+ ErrorSpec error,
+ const Shape* shape_with_layout = nullptr);
// ComputeAndCompare variant which returns an error status.
Status ComputeAndCompareLiteralWithStatus(
XlaBuilder* builder, const Literal& expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments,
+ absl::Span<GlobalData* const> arguments,
const Shape* shape_with_layout = nullptr);
Status ComputeAndCompareLiteralWithStatus(
XlaBuilder* builder, const Literal& expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments, ErrorSpec error,
+ absl::Span<GlobalData* const> arguments, ErrorSpec error,
const Shape* shape_with_layout = nullptr);
// Compare the result of the computation to a strings. In XLA strings are
// represented using rank-1 U8 shapes.
- void ComputeAndCompareR1U8(
- XlaBuilder* builder, absl::string_view expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments);
+ void ComputeAndCompareR1U8(XlaBuilder* builder, absl::string_view expected,
+ absl::Span<GlobalData* const> arguments);
// Convenience method for running a built computation, transferring the
// result, and comparing it to the expected tuple literal.
- void ComputeAndCompareTuple(
- XlaBuilder* builder, const Literal& expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments);
- void ComputeAndCompareTuple(
- XlaBuilder* builder, const Literal& expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments, ErrorSpec error);
+ void ComputeAndCompareTuple(XlaBuilder* builder, const Literal& expected,
+ absl::Span<GlobalData* const> arguments);
+ void ComputeAndCompareTuple(XlaBuilder* builder, const Literal& expected,
+ absl::Span<GlobalData* const> arguments,
+ ErrorSpec error);
// Convenience method for running a built computation and comparing the result
// with the reference result.
void ComputeAndCompare(XlaBuilder* builder,
- tensorflow::gtl::ArraySlice<Literal> arguments);
+ absl::Span<const Literal> arguments);
void ComputeAndCompare(XlaBuilder* builder,
- tensorflow::gtl::ArraySlice<Literal> arguments,
- ErrorSpec error);
+ absl::Span<const Literal> arguments, ErrorSpec error);
// Create scalar operations for use in reductions.
XlaComputation CreateScalarRelu();
@@ -337,7 +333,7 @@ class ClientLibraryTestBase : public ::testing::Test {
// converted to bfloat16.
template <typename NativeT>
std::unique_ptr<GlobalData> CreateR1Parameter(
- tensorflow::gtl::ArraySlice<NativeT> values, int64 parameter_number,
+ absl::Span<const NativeT> values, int64 parameter_number,
const string& name, XlaBuilder* builder, XlaOp* data_handle);
// Creates a parameter instruction that wraps the given constant array
@@ -381,7 +377,7 @@ class ClientLibraryTestBase : public ::testing::Test {
// actual).
StatusOr<std::pair<std::unique_ptr<Literal>, std::unique_ptr<Literal>>>
ComputeValueAndReference(XlaBuilder* builder,
- tensorflow::gtl::ArraySlice<Literal> arguments);
+ absl::Span<const Literal> arguments);
Client* client_;
Client* ref_client_; // To compute reference result.
@@ -390,12 +386,12 @@ class ClientLibraryTestBase : public ::testing::Test {
private:
Status ComputeAndCompareLiteralWithAllOutputLayouts(
const xla::XlaComputation& computation, const Literal& expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments,
+ absl::Span<GlobalData* const> arguments,
const std::function<void(const Literal& actual,
const string& error_message)>& verify_output);
Status ComputeAndCompareLiteralWithAllInputLayouts(
const xla::XlaComputation& computation, const Literal& expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments,
+ absl::Span<GlobalData* const> arguments,
const std::function<void(const Literal& actual,
const string& error_message)>& verify_output,
const Shape* output_with_layout = nullptr);
@@ -415,7 +411,7 @@ class ClientLibraryTestBase : public ::testing::Test {
template <typename NativeT>
void ClientLibraryTestBase::ComputeAndCompareR0(
XlaBuilder* builder, NativeT expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments) {
+ absl::Span<GlobalData* const> arguments) {
std::unique_ptr<Literal> expected_literal =
LiteralUtil::CreateR0<NativeT>(expected);
ClientLibraryTestBase::ComputeAndCompareLiteral(builder, *expected_literal,
@@ -425,7 +421,7 @@ void ClientLibraryTestBase::ComputeAndCompareR0(
template <typename NativeT>
void ClientLibraryTestBase::ComputeAndCompareR0(
XlaBuilder* builder, NativeT expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments, ErrorSpec error) {
+ absl::Span<GlobalData* const> arguments, ErrorSpec error) {
static_assert(std::is_same<NativeT, float>::value ||
std::is_same<NativeT, double>::value ||
std::is_same<NativeT, bfloat16>::value ||
@@ -440,8 +436,8 @@ void ClientLibraryTestBase::ComputeAndCompareR0(
template <typename NativeT>
void ClientLibraryTestBase::ComputeAndCompareR1(
- XlaBuilder* builder, tensorflow::gtl::ArraySlice<NativeT> expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments) {
+ XlaBuilder* builder, absl::Span<const NativeT> expected,
+ absl::Span<GlobalData* const> arguments) {
std::unique_ptr<Literal> expected_literal =
LiteralUtil::CreateR1<NativeT>(expected);
ClientLibraryTestBase::ComputeAndCompareLiteral(builder, *expected_literal,
@@ -450,8 +446,8 @@ void ClientLibraryTestBase::ComputeAndCompareR1(
template <typename NativeT>
void ClientLibraryTestBase::ComputeAndCompareR1(
- XlaBuilder* builder, tensorflow::gtl::ArraySlice<NativeT> expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments, ErrorSpec error) {
+ XlaBuilder* builder, absl::Span<const NativeT> expected,
+ absl::Span<GlobalData* const> arguments, ErrorSpec error) {
static_assert(std::is_same<NativeT, float>::value ||
std::is_same<NativeT, double>::value ||
std::is_same<NativeT, bfloat16>::value ||
@@ -467,7 +463,7 @@ void ClientLibraryTestBase::ComputeAndCompareR1(
template <typename NativeT>
void ClientLibraryTestBase::ComputeAndCompareR2(
XlaBuilder* builder, const Array2D<NativeT>& expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments) {
+ absl::Span<GlobalData* const> arguments) {
std::unique_ptr<Literal> expected_literal =
LiteralUtil::CreateR2FromArray2D<NativeT>(expected);
ClientLibraryTestBase::ComputeAndCompareLiteral(builder, *expected_literal,
@@ -477,7 +473,7 @@ void ClientLibraryTestBase::ComputeAndCompareR2(
template <typename NativeT>
void ClientLibraryTestBase::ComputeAndCompareR2(
XlaBuilder* builder, const Array2D<NativeT>& expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments, ErrorSpec error) {
+ absl::Span<GlobalData* const> arguments, ErrorSpec error) {
static_assert(std::is_same<NativeT, float>::value ||
std::is_same<NativeT, double>::value ||
std::is_same<NativeT, bfloat16>::value ||
@@ -493,7 +489,7 @@ void ClientLibraryTestBase::ComputeAndCompareR2(
template <typename NativeT>
void ClientLibraryTestBase::ComputeAndCompareR3(
XlaBuilder* builder, const Array3D<NativeT>& expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments) {
+ absl::Span<GlobalData* const> arguments) {
std::unique_ptr<Literal> expected_literal =
LiteralUtil::CreateR3FromArray3D<NativeT>(expected);
ClientLibraryTestBase::ComputeAndCompareLiteral(builder, *expected_literal,
@@ -503,7 +499,7 @@ void ClientLibraryTestBase::ComputeAndCompareR3(
template <typename NativeT>
void ClientLibraryTestBase::ComputeAndCompareR3(
XlaBuilder* builder, const Array3D<NativeT>& expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments, ErrorSpec error) {
+ absl::Span<GlobalData* const> arguments, ErrorSpec error) {
static_assert(std::is_same<NativeT, float>::value ||
std::is_same<NativeT, double>::value ||
std::is_same<NativeT, bfloat16>::value ||
@@ -519,7 +515,7 @@ void ClientLibraryTestBase::ComputeAndCompareR3(
template <typename NativeT>
void ClientLibraryTestBase::ComputeAndCompareR4(
XlaBuilder* builder, const Array4D<NativeT>& expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments) {
+ absl::Span<GlobalData* const> arguments) {
std::unique_ptr<Literal> expected_literal =
LiteralUtil::CreateR4FromArray4D<NativeT>(expected);
ClientLibraryTestBase::ComputeAndCompareLiteral(builder, *expected_literal,
@@ -529,7 +525,7 @@ void ClientLibraryTestBase::ComputeAndCompareR4(
template <typename NativeT>
void ClientLibraryTestBase::ComputeAndCompareR4(
XlaBuilder* builder, const Array4D<NativeT>& expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments, ErrorSpec error) {
+ absl::Span<GlobalData* const> arguments, ErrorSpec error) {
static_assert(std::is_same<NativeT, float>::value ||
std::is_same<NativeT, double>::value ||
std::is_same<NativeT, bfloat16>::value ||
@@ -558,7 +554,7 @@ std::unique_ptr<GlobalData> ClientLibraryTestBase::CreateR0Parameter(
template <typename NativeT>
std::unique_ptr<GlobalData> ClientLibraryTestBase::CreateR1Parameter(
- tensorflow::gtl::ArraySlice<NativeT> values, int64 parameter_number,
+ absl::Span<const NativeT> values, int64 parameter_number,
const string& name, XlaBuilder* builder, XlaOp* data_handle) {
std::unique_ptr<Literal> literal = LiteralUtil::CreateR1(values);
if (use_bfloat16_ && literal->shape().element_type() == F32) {
diff --git a/tensorflow/compiler/xla/tests/compilation_cache_test.cc b/tensorflow/compiler/xla/tests/compilation_cache_test.cc
index 7c52c9fbbb..25f6bfd843 100644
--- a/tensorflow/compiler/xla/tests/compilation_cache_test.cc
+++ b/tensorflow/compiler/xla/tests/compilation_cache_test.cc
@@ -38,10 +38,9 @@ namespace {
class CompilationCacheTest : public ClientLibraryTestBase {
public:
- void ExecuteComputationR0F32(
- const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments, float expected_result,
- bool expect_cache_hit) {
+ void ExecuteComputationR0F32(const XlaComputation& computation,
+ absl::Span<GlobalData* const> arguments,
+ float expected_result, bool expect_cache_hit) {
ExecutionProfile execution_profile;
std::unique_ptr<Literal> result =
client_
@@ -56,7 +55,7 @@ class CompilationCacheTest : public ClientLibraryTestBase {
void ExecuteComputationR2F32(
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments,
+ absl::Span<GlobalData* const> arguments,
std::initializer_list<std::initializer_list<float>> expected_result,
bool expect_cache_hit) {
ExecutionProfile execution_profile;
diff --git a/tensorflow/compiler/xla/tests/copy_test.cc b/tensorflow/compiler/xla/tests/copy_test.cc
index 50a9ebc1e9..526626c1dd 100644
--- a/tensorflow/compiler/xla/tests/copy_test.cc
+++ b/tensorflow/compiler/xla/tests/copy_test.cc
@@ -54,7 +54,7 @@ class CopyOpTest : public HloTestBase {
void TestCopyConstantLayout021(size_t n1, size_t n2, size_t n3);
void TestCopyConstantLayoutR4(size_t n1, size_t n2, size_t n3, size_t n4,
- tensorflow::gtl::ArraySlice<int64> permutation);
+ absl::Span<const int64> permutation);
};
XLA_TEST_F(CopyOpTest, CopyR0Bool) {
@@ -187,9 +187,9 @@ void CopyOpTest::TestCopyConstantLayout021(size_t n1, size_t n2, size_t n3) {
LiteralTestUtil::ExpectR3EqualArray3D(a, *result);
}
-void CopyOpTest::TestCopyConstantLayoutR4(
- size_t n1, size_t n2, size_t n3, size_t n4,
- tensorflow::gtl::ArraySlice<int64> permutation) {
+void CopyOpTest::TestCopyConstantLayoutR4(size_t n1, size_t n2, size_t n3,
+ size_t n4,
+ absl::Span<const int64> permutation) {
Array4D<int32> a(n1, n2, n3, n4);
for (size_t i = 0; i < n1; ++i) {
for (size_t j = 0; j < n2; ++j) {
diff --git a/tensorflow/compiler/xla/tests/deallocation_test.cc b/tensorflow/compiler/xla/tests/deallocation_test.cc
index 5f234f36a8..1ea9416881 100644
--- a/tensorflow/compiler/xla/tests/deallocation_test.cc
+++ b/tensorflow/compiler/xla/tests/deallocation_test.cc
@@ -36,7 +36,7 @@ class DeallocationTest : public ClientLibraryTestBase {
// Build and execute the given computation then verify the results can be
// transferred from the device successfully.
std::unique_ptr<GlobalData> ExecuteAndCheckTransfer(
- XlaBuilder* builder, tensorflow::gtl::ArraySlice<GlobalData*> arguments) {
+ XlaBuilder* builder, absl::Span<GlobalData* const> arguments) {
XlaComputation computation = builder->Build().ConsumeValueOrDie();
auto global_data =
client_->Execute(computation, arguments, &execution_options_)
diff --git a/tensorflow/compiler/xla/tests/deconstruct_tuple_test.cc b/tensorflow/compiler/xla/tests/deconstruct_tuple_test.cc
index 2db6503afa..e1435cf8ab 100644
--- a/tensorflow/compiler/xla/tests/deconstruct_tuple_test.cc
+++ b/tensorflow/compiler/xla/tests/deconstruct_tuple_test.cc
@@ -42,7 +42,7 @@ class DeconstructTupleTest : public ClientLibraryTestBase {
// Build and execute the given computation then verify the results can be
// transferred from the device successfully.
std::unique_ptr<GlobalData> ExecuteAndCheckTransfer(
- XlaBuilder* builder, tensorflow::gtl::ArraySlice<GlobalData*> arguments) {
+ XlaBuilder* builder, absl::Span<GlobalData* const> arguments) {
XlaComputation computation = builder->Build().ConsumeValueOrDie();
auto global_data =
client_->Execute(computation, arguments, &execution_options_)
diff --git a/tensorflow/compiler/xla/tests/dynamic_ops_test.cc b/tensorflow/compiler/xla/tests/dynamic_ops_test.cc
index 7f6f203a1b..9bf3767ca3 100644
--- a/tensorflow/compiler/xla/tests/dynamic_ops_test.cc
+++ b/tensorflow/compiler/xla/tests/dynamic_ops_test.cc
@@ -114,14 +114,14 @@ class DynamicSliceTest : public ClientLibraryTestBase {
}
template <typename IndexT, typename DataT>
- void RunR1(tensorflow::gtl::ArraySlice<int> input_values_int,
+ void RunR1(absl::Span<const int> input_values_int,
const std::vector<IndexT> slice_starts,
const std::vector<int64>& slice_sizes,
- tensorflow::gtl::ArraySlice<int> expected_values_int) {
+ absl::Span<const int> expected_values_int) {
// bfloat16 has explicit constructors, so it does not implicitly convert the
// way built-in types do, which is why we can't take the parameter as an
- // ArraySlice<DataT>. We also can't convert it to a vector, because
- // vector<bool> is special so that it cannot be an ArraySlice<bool>, which
+ // Span<DataT>. We also can't convert it to a vector, because
+ // vector<bool> is special so that it cannot be a Span<bool>, which
// is what the code below wants. So instead we do this.
Literal input_values =
std::move(*LiteralUtil::CreateR1(input_values_int)
@@ -385,10 +385,10 @@ class DynamicUpdateSliceTest : public ClientLibraryTestBase {
}
template <typename IndexT, typename DataT>
- void RunR1(tensorflow::gtl::ArraySlice<int> input_values_int,
- tensorflow::gtl::ArraySlice<int> update_values_int,
+ void RunR1(absl::Span<const int> input_values_int,
+ absl::Span<const int> update_values_int,
const std::vector<IndexT> slice_starts,
- tensorflow::gtl::ArraySlice<int> expected_values_int) {
+ absl::Span<const int> expected_values_int) {
Literal input_values =
std::move(*LiteralUtil::CreateR1(input_values_int)
->Convert(primitive_util::NativeToPrimitiveType<DataT>())
diff --git a/tensorflow/compiler/xla/tests/floor_ceil_test.cc b/tensorflow/compiler/xla/tests/floor_ceil_test.cc
index 4a835a8e21..313d10566e 100644
--- a/tensorflow/compiler/xla/tests/floor_ceil_test.cc
+++ b/tensorflow/compiler/xla/tests/floor_ceil_test.cc
@@ -37,8 +37,8 @@ class FloorCeilTest : public ClientLibraryTestBase {
};
// Runs a computation and comparison on expected vs f(input)
- void TestR1F32(tensorflow::gtl::ArraySlice<float> input,
- tensorflow::gtl::ArraySlice<float> expected, Function f) {
+ void TestR1F32(absl::Span<const float> input,
+ absl::Span<const float> expected, Function f) {
LOG(INFO) << "input: {" << absl::StrJoin(expected, ", ") << "}";
XlaBuilder builder(TestName());
auto c = ConstantR1<float>(&builder, input);
diff --git a/tensorflow/compiler/xla/tests/fusion_test.cc b/tensorflow/compiler/xla/tests/fusion_test.cc
index 15a9d55bfe..cbfe147953 100644
--- a/tensorflow/compiler/xla/tests/fusion_test.cc
+++ b/tensorflow/compiler/xla/tests/fusion_test.cc
@@ -48,7 +48,6 @@ limitations under the License.
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/platform/types.h"
-using tensorflow::gtl::ArraySlice;
namespace xla {
namespace {
@@ -113,7 +112,7 @@ class FusionTest : public HloTestBase {
hlos[0] = builder.AddInstruction(std::move(root_hlo));
hlo_module->AddEntryComputation(builder.Build())
->CreateFusionInstruction(
- ArraySlice<HloInstruction*>(hlos).subspan(0, Arity + 1),
+ absl::Span<HloInstruction* const>(hlos).subspan(0, Arity + 1),
HloInstruction::FusionKind::kLoop);
auto expected = LiteralUtil::CreateR2FromArray2D(answer_data);
@@ -127,12 +126,12 @@ class FusionTest : public HloTestBase {
private:
template <typename T>
- T ComputeElementwiseAnswer(HloOpcode opcode, ArraySlice<float> xs);
+ T ComputeElementwiseAnswer(HloOpcode opcode, absl::Span<const float> xs);
};
template <>
float FusionTest::ComputeElementwiseAnswer<float>(HloOpcode opcode,
- ArraySlice<float> xs) {
+ absl::Span<const float> xs) {
switch (opcode) {
case HloOpcode::kAdd:
return xs[0] + xs[1];
@@ -157,7 +156,7 @@ float FusionTest::ComputeElementwiseAnswer<float>(HloOpcode opcode,
template <>
bool FusionTest::ComputeElementwiseAnswer<bool>(HloOpcode opcode,
- ArraySlice<float> xs) {
+ absl::Span<const float> xs) {
switch (opcode) {
case HloOpcode::kEq:
return xs[0] == xs[1];
diff --git a/tensorflow/compiler/xla/tests/gather_operation_test.cc b/tensorflow/compiler/xla/tests/gather_operation_test.cc
index 205d417f0c..6d63498044 100644
--- a/tensorflow/compiler/xla/tests/gather_operation_test.cc
+++ b/tensorflow/compiler/xla/tests/gather_operation_test.cc
@@ -34,8 +34,7 @@ class GatherOperationTest : public HloTestBase {
RunTest(hlo_text, {operand, start_indices});
}
- void RunTest(const string& hlo_text,
- tensorflow::gtl::ArraySlice<Literal*> args) {
+ void RunTest(const string& hlo_text, absl::Span<Literal* const> args) {
HloModuleConfig config;
config.set_debug_options(GetDebugOptionsForTest());
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
diff --git a/tensorflow/compiler/xla/tests/half_test.cc b/tensorflow/compiler/xla/tests/half_test.cc
index 51450314b6..1115e50fe3 100644
--- a/tensorflow/compiler/xla/tests/half_test.cc
+++ b/tensorflow/compiler/xla/tests/half_test.cc
@@ -126,9 +126,8 @@ INSTANTIATE_TEST_CASE_P(half, UnaryPredTest,
::testing::Values(UnaryPredTestParam{
[](half x) { return isfinite(x); }, &IsFinite}));
-using BinaryBuildFuncTy =
- std::function<void(const xla::XlaOp& x, const xla::XlaOp& y,
- tensorflow::gtl::ArraySlice<int64>)>;
+using BinaryBuildFuncTy = std::function<void(
+ const xla::XlaOp& x, const xla::XlaOp& y, absl::Span<const int64>)>;
struct BinaryOpTestParam {
std::function<half(half, half)> compute_func;
diff --git a/tensorflow/compiler/xla/tests/hlo_test_base.cc b/tensorflow/compiler/xla/tests/hlo_test_base.cc
index 93ea144438..1c42a19414 100644
--- a/tensorflow/compiler/xla/tests/hlo_test_base.cc
+++ b/tensorflow/compiler/xla/tests/hlo_test_base.cc
@@ -44,7 +44,6 @@ namespace {
using absl::optional;
using absl::string_view;
-using tensorflow::gtl::ArraySlice;
constexpr char kInterpreter[] = "interpreter";
@@ -130,14 +129,12 @@ DebugOptions HloTestBase::GetDebugOptionsForTest() {
}
StatusOr<std::unique_ptr<Literal>> HloTestBase::Execute(
- std::unique_ptr<HloModule> module,
- tensorflow::gtl::ArraySlice<Literal*> arguments) {
+ std::unique_ptr<HloModule> module, absl::Span<Literal* const> arguments) {
return test_runner_.Execute(std::move(module), arguments);
}
std::unique_ptr<Literal> HloTestBase::ExecuteNoHloPasses(
- std::unique_ptr<HloModule> module,
- tensorflow::gtl::ArraySlice<Literal*> arguments) {
+ std::unique_ptr<HloModule> module, absl::Span<Literal* const> arguments) {
return test_runner_
.Execute(std::move(module), arguments,
/*run_hlo_passes=*/false)
@@ -145,8 +142,7 @@ std::unique_ptr<Literal> HloTestBase::ExecuteNoHloPasses(
}
std::unique_ptr<Literal> HloTestBase::ExecuteAndTransfer(
- std::unique_ptr<HloModule> module,
- tensorflow::gtl::ArraySlice<Literal*> arguments) {
+ std::unique_ptr<HloModule> module, absl::Span<Literal* const> arguments) {
return test_runner_.Execute(std::move(module), arguments).ValueOrDie();
}
@@ -169,7 +165,8 @@ StatusOr<std::unique_ptr<HloModule>> HloTestBase::MakeReferenceModule(
}
StatusOr<::testing::AssertionResult> HloTestBase::RunAndCompareInternal(
- std::unique_ptr<HloModule> module, const ArraySlice<Literal*> arguments,
+ std::unique_ptr<HloModule> module,
+ const absl::Span<Literal* const> arguments,
const optional<ErrorSpec>& error, bool run_hlo_passes,
const std::function<void(HloModule*)>& reference_preprocessor) {
TF_RETURN_IF_ERROR(hlo_verifier_->Run(module.get()).status());
@@ -188,7 +185,8 @@ StatusOr<::testing::AssertionResult> HloTestBase::RunAndCompareInternal(
}
::testing::AssertionResult HloTestBase::RunAndCompare(
- std::unique_ptr<HloModule> module, const ArraySlice<Literal*> arguments,
+ std::unique_ptr<HloModule> module,
+ const absl::Span<Literal* const> arguments,
const optional<ErrorSpec>& error,
const std::function<void(HloModule*)>& reference_preprocessor) {
auto result =
@@ -201,7 +199,8 @@ StatusOr<::testing::AssertionResult> HloTestBase::RunAndCompareInternal(
}
::testing::AssertionResult HloTestBase::RunAndCompareNoHloPasses(
- std::unique_ptr<HloModule> module, const ArraySlice<Literal*> arguments,
+ std::unique_ptr<HloModule> module,
+ const absl::Span<Literal* const> arguments,
const optional<ErrorSpec>& error,
const std::function<void(HloModule*)>& reference_preprocessor) {
auto result =
diff --git a/tensorflow/compiler/xla/tests/hlo_test_base.h b/tensorflow/compiler/xla/tests/hlo_test_base.h
index 06bcc39741..1468930e9c 100644
--- a/tensorflow/compiler/xla/tests/hlo_test_base.h
+++ b/tensorflow/compiler/xla/tests/hlo_test_base.h
@@ -114,18 +114,15 @@ class HloTestBase : public ::testing::Test {
// Executes the given module and return the result as a Literal.
StatusOr<std::unique_ptr<Literal>> Execute(
- std::unique_ptr<HloModule> module,
- tensorflow::gtl::ArraySlice<Literal*> arguments);
+ std::unique_ptr<HloModule> module, absl::Span<Literal* const> arguments);
// Same as above, except the module will be executed without running any HLO
// passes on it.
std::unique_ptr<Literal> ExecuteNoHloPasses(
- std::unique_ptr<HloModule> module,
- tensorflow::gtl::ArraySlice<Literal*> arguments);
+ std::unique_ptr<HloModule> module, absl::Span<Literal* const> arguments);
std::unique_ptr<Literal> ExecuteAndTransfer(
- std::unique_ptr<HloModule> module,
- tensorflow::gtl::ArraySlice<Literal*> arguments);
+ std::unique_ptr<HloModule> module, absl::Span<Literal* const> arguments);
// Executes the given hlo module on two backends and compares results.
//
@@ -140,7 +137,7 @@ class HloTestBase : public ::testing::Test {
// modified.
::testing::AssertionResult RunAndCompare(
std::unique_ptr<HloModule> module,
- const tensorflow::gtl::ArraySlice<Literal*> arguments,
+ const absl::Span<Literal* const> arguments,
const absl::optional<ErrorSpec>& error,
const std::function<void(HloModule*)>& reference_preprocessor = nullptr)
TF_MUST_USE_RESULT;
@@ -149,7 +146,7 @@ class HloTestBase : public ::testing::Test {
// optimization.
::testing::AssertionResult RunAndCompareNoHloPasses(
std::unique_ptr<HloModule> module,
- const tensorflow::gtl::ArraySlice<Literal*> arguments,
+ const absl::Span<Literal* const> arguments,
const absl::optional<ErrorSpec>& error,
const std::function<void(HloModule*)>& reference_preprocessor = nullptr)
TF_MUST_USE_RESULT;
@@ -261,7 +258,7 @@ class HloTestBase : public ::testing::Test {
// error happens before the results are computed, returns the error status.
StatusOr<::testing::AssertionResult> RunAndCompareInternal(
std::unique_ptr<HloModule> module,
- const tensorflow::gtl::ArraySlice<Literal*> arguments,
+ const absl::Span<Literal* const> arguments,
const absl::optional<ErrorSpec>& error, bool run_hlo_passes,
const std::function<void(HloModule*)>& reference_preprocessor);
};
diff --git a/tensorflow/compiler/xla/tests/literal_test_util.h b/tensorflow/compiler/xla/tests/literal_test_util.h
index 3dad91951e..342a0aa735 100644
--- a/tensorflow/compiler/xla/tests/literal_test_util.h
+++ b/tensorflow/compiler/xla/tests/literal_test_util.h
@@ -62,7 +62,7 @@ class LiteralTestUtil {
static void ExpectR0Equal(NativeT expected, const LiteralSlice& actual);
template <typename NativeT>
- static void ExpectR1Equal(tensorflow::gtl::ArraySlice<NativeT> expected,
+ static void ExpectR1Equal(absl::Span<const NativeT> expected,
const LiteralSlice& actual);
template <typename NativeT>
static void ExpectR2Equal(
@@ -102,7 +102,7 @@ class LiteralTestUtil {
const ErrorSpec& error);
template <typename NativeT>
- static void ExpectR1Near(tensorflow::gtl::ArraySlice<NativeT> expected,
+ static void ExpectR1Near(absl::Span<const NativeT> expected,
const LiteralSlice& actual, const ErrorSpec& error);
template <typename NativeT>
@@ -160,7 +160,7 @@ template <typename NativeT>
template <typename NativeT>
/* static */ void LiteralTestUtil::ExpectR1Equal(
- tensorflow::gtl::ArraySlice<NativeT> expected, const LiteralSlice& actual) {
+ absl::Span<const NativeT> expected, const LiteralSlice& actual) {
EXPECT_TRUE(Equal(*LiteralUtil::CreateR1<NativeT>(expected), actual));
}
@@ -206,7 +206,7 @@ template <typename NativeT>
template <typename NativeT>
/* static */ void LiteralTestUtil::ExpectR1Near(
- tensorflow::gtl::ArraySlice<NativeT> expected, const LiteralSlice& actual,
+ absl::Span<const NativeT> expected, const LiteralSlice& actual,
const ErrorSpec& error) {
EXPECT_TRUE(Near(*LiteralUtil::CreateR1<NativeT>(expected), actual, error));
}
diff --git a/tensorflow/compiler/xla/tests/local_client_test_base.cc b/tensorflow/compiler/xla/tests/local_client_test_base.cc
index 948b60061e..a8c68fc7fd 100644
--- a/tensorflow/compiler/xla/tests/local_client_test_base.cc
+++ b/tensorflow/compiler/xla/tests/local_client_test_base.cc
@@ -156,7 +156,7 @@ ExecutableRunOptions LocalClientTestBase::DefaultExecutableRunOptions() const {
ScopedShapedBuffer LocalClientTestBase::ExecuteLocallyOrDie(
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments) {
+ absl::Span<const ShapedBuffer* const> arguments) {
return ExecuteLocally(computation, arguments, DefaultExecutableBuildOptions(),
DefaultExecutableRunOptions())
.ConsumeValueOrDie();
@@ -164,7 +164,7 @@ ScopedShapedBuffer LocalClientTestBase::ExecuteLocallyOrDie(
ScopedShapedBuffer LocalClientTestBase::ExecuteLocallyOrDie(
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
+ absl::Span<const ShapedBuffer* const> arguments,
const ExecutableBuildOptions& build_options,
const ExecutableRunOptions& run_options) {
return ExecuteLocally(computation, arguments, build_options, run_options)
@@ -173,14 +173,14 @@ ScopedShapedBuffer LocalClientTestBase::ExecuteLocallyOrDie(
StatusOr<ScopedShapedBuffer> LocalClientTestBase::ExecuteLocally(
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments) {
+ absl::Span<const ShapedBuffer* const> arguments) {
return ExecuteLocally(computation, arguments, DefaultExecutableBuildOptions(),
DefaultExecutableRunOptions());
}
StatusOr<ScopedShapedBuffer> LocalClientTestBase::ExecuteLocally(
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
+ absl::Span<const ShapedBuffer* const> arguments,
const ExecutableBuildOptions& build_options,
const ExecutableRunOptions& run_options) {
std::vector<const Shape*> argument_layouts(arguments.size());
diff --git a/tensorflow/compiler/xla/tests/local_client_test_base.h b/tensorflow/compiler/xla/tests/local_client_test_base.h
index b4477e9a6b..cb74ff71d4 100644
--- a/tensorflow/compiler/xla/tests/local_client_test_base.h
+++ b/tensorflow/compiler/xla/tests/local_client_test_base.h
@@ -93,19 +93,19 @@ class LocalClientTestBase : public ::testing::Test {
// options.
StatusOr<ScopedShapedBuffer> ExecuteLocally(
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments);
+ absl::Span<const ShapedBuffer* const> arguments);
StatusOr<ScopedShapedBuffer> ExecuteLocally(
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
+ absl::Span<const ShapedBuffer* const> arguments,
const ExecutableBuildOptions& build_options,
const ExecutableRunOptions& run_options);
ScopedShapedBuffer ExecuteLocallyOrDie(
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments);
+ absl::Span<const ShapedBuffer* const> arguments);
ScopedShapedBuffer ExecuteLocallyOrDie(
const XlaComputation& computation,
- tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
+ absl::Span<const ShapedBuffer* const> arguments,
const ExecutableBuildOptions& build_options,
const ExecutableRunOptions& run_options);
diff --git a/tensorflow/compiler/xla/tests/multioutput_fusion_test.cc b/tensorflow/compiler/xla/tests/multioutput_fusion_test.cc
index b0a324f6fc..fc0ba28b12 100644
--- a/tensorflow/compiler/xla/tests/multioutput_fusion_test.cc
+++ b/tensorflow/compiler/xla/tests/multioutput_fusion_test.cc
@@ -47,7 +47,6 @@ limitations under the License.
namespace xla {
namespace {
-using ::tensorflow::gtl::ArraySlice;
class MultiOutputFusionTest : public HloTestBase {
protected:
diff --git a/tensorflow/compiler/xla/tests/pred_test.cc b/tensorflow/compiler/xla/tests/pred_test.cc
index 2fc7f816b5..58539e6b06 100644
--- a/tensorflow/compiler/xla/tests/pred_test.cc
+++ b/tensorflow/compiler/xla/tests/pred_test.cc
@@ -31,7 +31,7 @@ class PredTest : public ClientLibraryTestBase {
protected:
void TestCompare(bool lhs, bool rhs, bool expected,
std::function<XlaOp(const xla::XlaOp&, const xla::XlaOp&,
- tensorflow::gtl::ArraySlice<int64>)>
+ absl::Span<const int64>)>
op) {
XlaBuilder builder(TestName());
XlaOp lhs_op = ConstantR0<bool>(&builder, lhs);
diff --git a/tensorflow/compiler/xla/tests/prng_test.cc b/tensorflow/compiler/xla/tests/prng_test.cc
index 326e13b386..ee6bdfd006 100644
--- a/tensorflow/compiler/xla/tests/prng_test.cc
+++ b/tensorflow/compiler/xla/tests/prng_test.cc
@@ -37,8 +37,7 @@ namespace {
class PrngTest : public ClientLibraryTestBase {
protected:
template <typename T>
- std::unique_ptr<Literal> UniformTest(T a, T b,
- tensorflow::gtl::ArraySlice<int64> dims,
+ std::unique_ptr<Literal> UniformTest(T a, T b, absl::Span<const int64> dims,
int64 seed = 42);
// Computes the χ² statistic of a sample of the discrete uniform distribution
@@ -50,8 +49,9 @@ class PrngTest : public ClientLibraryTestBase {
};
template <typename T>
-std::unique_ptr<Literal> PrngTest::UniformTest(
- T a, T b, tensorflow::gtl::ArraySlice<int64> dims, int64 seed) {
+std::unique_ptr<Literal> PrngTest::UniformTest(T a, T b,
+ absl::Span<const int64> dims,
+ int64 seed) {
XlaBuilder builder(TestName());
RngUniform(
ConstantR0<T>(&builder, a), ConstantR0<T>(&builder, b),
@@ -61,7 +61,7 @@ std::unique_ptr<Literal> PrngTest::UniformTest(
auto actual =
ExecuteAndTransfer(&builder, /*arguments=*/{}).ConsumeValueOrDie();
EXPECT_THAT(dims, ::testing::ElementsAreArray(actual->shape().dimensions()));
- actual->EachCell<T>([=](tensorflow::gtl::ArraySlice<int64>, T value) {
+ actual->EachCell<T>([=](absl::Span<const int64>, T value) {
EXPECT_LE(a, value);
EXPECT_LT(value, b);
});
@@ -117,7 +117,7 @@ XLA_TEST_F(PrngTest, DISABLED_ON_GPU(DISABLED_ON_CPU(ScalarBF16CountTests))) {
for (int64 seed = 0; seed < count; ++seed) {
auto result = UniformTest<bfloat16>(low, high, {}, /*seed=*/seed);
result->Literal::EachCell<bfloat16>(
- [&](tensorflow::gtl::ArraySlice<int64>, bfloat16 value) {
+ [&](absl::Span<const int64>, bfloat16 value) {
int64 index = static_cast<int64>((value - low) / interval);
counts[index]++;
});
@@ -149,8 +149,8 @@ double PrngTest::UniformChiSquared(int32 range_size, int32 expected_count,
auto actual =
ExecuteAndTransfer(&builder, /*arguments=*/{}).ConsumeValueOrDie();
std::vector<int32> counts(range_size, 0);
- actual->EachCell<int32>([&counts](tensorflow::gtl::ArraySlice<int64>,
- int32 value) { ++counts[value]; });
+ actual->EachCell<int32>(
+ [&counts](absl::Span<const int64>, int32 value) { ++counts[value]; });
int64 sum = 0;
for (int32 i = 0; i < range_size; ++i) {
sum += Square(static_cast<int64>(counts[i] - expected_count));
diff --git a/tensorflow/compiler/xla/tests/reduce_test.cc b/tensorflow/compiler/xla/tests/reduce_test.cc
index 346f702488..51d429ddea 100644
--- a/tensorflow/compiler/xla/tests/reduce_test.cc
+++ b/tensorflow/compiler/xla/tests/reduce_test.cc
@@ -115,8 +115,7 @@ class ReduceTest : public ClientLibraryTestBase {
ErrorSpec(0.001));
}
- void RunR1ToR0PredTest(bool and_reduce,
- tensorflow::gtl::ArraySlice<int> input_data) {
+ void RunR1ToR0PredTest(bool and_reduce, absl::Span<const int> input_data) {
const int element_count = input_data.size();
XlaBuilder builder(TestName());
const Shape input_shape = ShapeUtil::MakeShape(S32, {element_count});
@@ -261,8 +260,8 @@ class ReduceTest : public ClientLibraryTestBase {
void ComputeAndCompareGeneric(
typename std::enable_if<std::is_floating_point<NativeT>::value,
XlaBuilder>::type* builder,
- tensorflow::gtl::ArraySlice<NativeT> expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments) {
+ absl::Span<const NativeT> expected,
+ absl::Span<GlobalData* const> arguments) {
ComputeAndCompareR1<NativeT>(builder, expected, arguments,
ErrorSpec(0.01, 1e-4));
}
@@ -271,8 +270,8 @@ class ReduceTest : public ClientLibraryTestBase {
void ComputeAndCompareGeneric(
typename std::enable_if<std::is_integral<NativeT>::value,
XlaBuilder>::type* builder,
- tensorflow::gtl::ArraySlice<NativeT> expected,
- tensorflow::gtl::ArraySlice<GlobalData*> arguments) {
+ absl::Span<const NativeT> expected,
+ absl::Span<GlobalData* const> arguments) {
ComputeAndCompareR1<NativeT>(builder, expected, arguments);
}
@@ -304,7 +303,7 @@ class ReduceTest : public ClientLibraryTestBase {
client_->TransferToServer(*input_literal).ConsumeValueOrDie();
// NativeT can be bool, and std::vector<bool> does not convert to
- // ArraySlice.
+ // Span.
std::unique_ptr<NativeT[]> expected(new NativeT[cols]);
for (int64 colno = 0; colno < cols; ++colno) {
NativeT column_result = initial_value;
@@ -316,7 +315,7 @@ class ReduceTest : public ClientLibraryTestBase {
}
ComputeAndCompareGeneric<NativeT>(
- &builder, tensorflow::gtl::ArraySlice<NativeT>(expected.get(), cols),
+ &builder, absl::Span<const NativeT>(expected.get(), cols),
{input_global_data.get()});
}
diff --git a/tensorflow/compiler/xla/tests/reduce_window_test.cc b/tensorflow/compiler/xla/tests/reduce_window_test.cc
index 60167619a4..679ee4d482 100644
--- a/tensorflow/compiler/xla/tests/reduce_window_test.cc
+++ b/tensorflow/compiler/xla/tests/reduce_window_test.cc
@@ -70,8 +70,8 @@ class ReduceWindowTest : public ::testing::WithParamInterface<bool>,
ReduceWindowTest() : builder_(TestName()) { set_use_bfloat16(GetParam()); }
void ReduceWindowAdd(const XlaOp& input,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides,
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides,
Padding padding) {
auto init = CreateConstantFromLiteral(*LiteralUtil::CreateR0<float>(0.0f),
&builder_);
@@ -81,8 +81,8 @@ class ReduceWindowTest : public ::testing::WithParamInterface<bool>,
}
void ReduceWindowMax(const XlaOp& input,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides,
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides,
Padding padding) {
auto init =
CreateConstantFromLiteral(LiteralUtil::MinValue(F32), &builder_);
@@ -92,8 +92,8 @@ class ReduceWindowTest : public ::testing::WithParamInterface<bool>,
}
void ReduceWindowMin(const XlaOp& input,
- tensorflow::gtl::ArraySlice<int64> window_dimensions,
- tensorflow::gtl::ArraySlice<int64> window_strides,
+ absl::Span<const int64> window_dimensions,
+ absl::Span<const int64> window_strides,
Padding padding) {
auto init =
CreateConstantFromLiteral(LiteralUtil::MaxValue(F32), &builder_);
@@ -1303,7 +1303,7 @@ TEST_P(R1ReduceWindowTest, DoIt) {
std::vector<float> input_vector(param.base_bounds[0]);
std::iota(std::begin(input_vector), std::end(input_vector), 0);
std::unique_ptr<Literal> input_literal =
- LiteralUtil::CreateR1(tensorflow::gtl::ArraySlice<float>(input_vector));
+ LiteralUtil::CreateR1(absl::Span<const float>(input_vector));
XlaOp parameter;
auto input_arg = CreateParameterAndTransferLiteral(0, *input_literal, "p0",
&b, &parameter);
@@ -1327,7 +1327,7 @@ TEST_P(R1ReduceWindowTest, DoIt) {
? +[](float a, float b) { return a + b; }
: +[](float a, float b) { return std::max(a, b); };
auto expected = ReferenceUtil::ReduceWindow1DGeneric(
- /*operand=*/tensorflow::gtl::ArraySlice<float>(input_vector),
+ /*operand=*/absl::Span<const float>(input_vector),
/*init=*/kInitValue,
/*reduce_func=*/reduce_func,
/*window=*/param.window_bounds,
diff --git a/tensorflow/compiler/xla/tests/reshape_test.cc b/tensorflow/compiler/xla/tests/reshape_test.cc
index 382d1b1ae7..ec4a2bcd3d 100644
--- a/tensorflow/compiler/xla/tests/reshape_test.cc
+++ b/tensorflow/compiler/xla/tests/reshape_test.cc
@@ -689,9 +689,8 @@ XLA_TEST_P(ReshapeTest, R4ToR2_2x1x1x1_To_2x1) {
std::mt19937 rng;
std::uniform_real_distribution<float> distribution;
Array4D<float> input(2, 1, 1, 1);
- input.Each(
- [&rng, &distribution](tensorflow::gtl::ArraySlice<int64> /* indices */,
- float* cell) { *cell = distribution(rng); });
+ input.Each([&rng, &distribution](absl::Span<const int64> /* indices */,
+ float* cell) { *cell = distribution(rng); });
std::unique_ptr<Literal> input_literal =
LiteralUtil::CreateR4FromArray4DWithLayout(
input, LayoutUtil::MakeLayout({3, 2, 1, 0}));
@@ -711,9 +710,8 @@ XLA_TEST_P(ReshapeTest, R4ToR2_2x1x4x1_To_4x2) {
std::mt19937 rng;
std::uniform_real_distribution<float> distribution;
Array4D<float> input(2, 1, 4, 1);
- input.Each(
- [&rng, &distribution](tensorflow::gtl::ArraySlice<int64> /* indices */,
- float* cell) { *cell = distribution(rng); });
+ input.Each([&rng, &distribution](absl::Span<const int64> /* indices */,
+ float* cell) { *cell = distribution(rng); });
std::unique_ptr<Literal> input_literal =
LiteralUtil::CreateR4FromArray4DWithLayout(
input, LayoutUtil::MakeLayout({3, 2, 1, 0}));
@@ -734,9 +732,8 @@ XLA_TEST_P(ReshapeTest, R4ToR2_5x10x2x3_To_5x60_Dimensions_0213) {
std::mt19937 rng;
std::uniform_real_distribution<float> distribution;
Array4D<float> input(5, 10, 2, 3);
- input.Each(
- [&rng, &distribution](tensorflow::gtl::ArraySlice<int64> /* indices */,
- float* cell) { *cell = distribution(rng); });
+ input.Each([&rng, &distribution](absl::Span<const int64> /* indices */,
+ float* cell) { *cell = distribution(rng); });
std::unique_ptr<Literal> input_literal =
LiteralUtil::CreateR4FromArray4DWithLayout(
input, LayoutUtil::MakeLayout({3, 2, 1, 0}));
@@ -747,7 +744,7 @@ XLA_TEST_P(ReshapeTest, R4ToR2_5x10x2x3_To_5x60_Dimensions_0213) {
/*new_sizes=*/{5, 60});
Array2D<float> expected_array(5, 60);
- input.Each([&](tensorflow::gtl::ArraySlice<int64> indices, float* cell) {
+ input.Each([&](absl::Span<const int64> indices, float* cell) {
expected_array(indices[0], indices[2] * 30 + indices[1] * 3 + indices[3]) =
*cell;
});
@@ -762,7 +759,7 @@ XLA_TEST_P(ReshapeTest, NoopReshape) {
std::uniform_real_distribution<float> distribution;
Array4D<float> input_array(2, 3, 5, 7);
input_array.Each(
- [&rng, &distribution](tensorflow::gtl::ArraySlice<int64> /* indices */,
+ [&rng, &distribution](absl::Span<const int64> /* indices */,
float* cell) { *cell = distribution(rng); });
std::unique_ptr<Literal> input_literal =
LiteralUtil::CreateR4FromArray4DWithLayout(
@@ -842,9 +839,8 @@ XLA_TEST_P(ReshapeTest, R4TwoMinorTransposeSimple) {
std::vector<int64> bounds = {2, 2, 2, 2};
std::vector<int64> new_bounds = {bounds[0], bounds[1], bounds[3], bounds[2]};
Array4D<float> input(bounds[0], bounds[1], bounds[2], bounds[3]);
- input.Each(
- [&rng, &distribution](tensorflow::gtl::ArraySlice<int64> /* indices */,
- float* cell) { *cell = distribution(rng); });
+ input.Each([&rng, &distribution](absl::Span<const int64> /* indices */,
+ float* cell) { *cell = distribution(rng); });
std::unique_ptr<Literal> input_literal =
LiteralUtil::CreateR4FromArray4DWithLayout(
input, LayoutUtil::MakeLayout({3, 2, 1, 0}));
@@ -871,9 +867,8 @@ XLA_TEST_P(ReshapeTest, R4TwoMinorTransposeMajorFirstEffectiveR2) {
std::vector<int64> bounds = {1, 1, 250, 300};
std::vector<int64> new_bounds = {bounds[0], bounds[1], bounds[3], bounds[2]};
Array4D<float> input(bounds[0], bounds[1], bounds[2], bounds[3]);
- input.Each(
- [&rng, &distribution](tensorflow::gtl::ArraySlice<int64> /* indices */,
- float* cell) { *cell = distribution(rng); });
+ input.Each([&rng, &distribution](absl::Span<const int64> /* indices */,
+ float* cell) { *cell = distribution(rng); });
std::unique_ptr<Literal> input_literal =
LiteralUtil::CreateR4FromArray4DWithLayout(
input, LayoutUtil::MakeLayout({3, 2, 1, 0}));
@@ -900,9 +895,8 @@ XLA_TEST_P(ReshapeTest, R4TwoMinorTransposeMajorFirstMinorEffectiveR1) {
std::vector<int64> bounds = {5, 5, 1, 10};
std::vector<int64> new_bounds = {bounds[0], bounds[1], bounds[3], bounds[2]};
Array4D<float> input(bounds[0], bounds[1], bounds[2], bounds[3]);
- input.Each(
- [&rng, &distribution](tensorflow::gtl::ArraySlice<int64> /* indices */,
- float* cell) { *cell = distribution(rng); });
+ input.Each([&rng, &distribution](absl::Span<const int64> /* indices */,
+ float* cell) { *cell = distribution(rng); });
std::unique_ptr<Literal> input_literal =
LiteralUtil::CreateR4FromArray4DWithLayout(
input, LayoutUtil::MakeLayout({3, 2, 1, 0}));
@@ -930,9 +924,8 @@ XLA_TEST_P(ReshapeTest, R4TwoMinorTransposeMajorFirstMinorEffectiveR1InR2) {
std::vector<int64> bounds = {5, 5, 10, 1};
std::vector<int64> new_bounds = {bounds[0], bounds[1], bounds[3], bounds[2]};
Array4D<float> input(bounds[0], bounds[1], bounds[2], bounds[3]);
- input.Each(
- [&rng, &distribution](tensorflow::gtl::ArraySlice<int64> /* indices */,
- float* cell) { *cell = distribution(rng); });
+ input.Each([&rng, &distribution](absl::Span<const int64> /* indices */,
+ float* cell) { *cell = distribution(rng); });
std::unique_ptr<Literal> input_literal =
LiteralUtil::CreateR4FromArray4DWithLayout(
input, LayoutUtil::MakeLayout({3, 2, 1, 0}));
@@ -959,9 +952,8 @@ XLA_TEST_P(ReshapeTest, R4TwoMinorTransposeTrivialR2) {
std::vector<int64> bounds = {3, 3, 1, 3};
std::vector<int64> new_bounds = {bounds[1], bounds[0], bounds[2], bounds[3]};
Array4D<float> input(bounds[0], bounds[1], bounds[2], bounds[3]);
- input.Each(
- [&rng, &distribution](tensorflow::gtl::ArraySlice<int64> /* indices */,
- float* cell) { *cell = distribution(rng); });
+ input.Each([&rng, &distribution](absl::Span<const int64> /* indices */,
+ float* cell) { *cell = distribution(rng); });
std::unique_ptr<Literal> input_literal =
LiteralUtil::CreateR4FromArray4DWithLayout(
input, LayoutUtil::MakeLayout({0, 1, 2, 3}));
diff --git a/tensorflow/compiler/xla/tests/reverse_test.cc b/tensorflow/compiler/xla/tests/reverse_test.cc
index c755ff63c9..74ded82ddf 100644
--- a/tensorflow/compiler/xla/tests/reverse_test.cc
+++ b/tensorflow/compiler/xla/tests/reverse_test.cc
@@ -39,8 +39,8 @@ static std::array<bool, 1> use_bfloat16_params{false};
#endif
struct ReverseSpec {
- tensorflow::gtl::ArraySlice<int64> input_dims;
- tensorflow::gtl::ArraySlice<int64> reversal;
+ absl::Span<const int64> input_dims;
+ absl::Span<const int64> reversal;
bool use_bfloat16;
string ToTestCaseName() const {
@@ -91,17 +91,16 @@ TEST_P(FloatReverseTest, Reverses) {
std::unique_ptr<Literal> expected = input_literal->CloneToUnique();
std::vector<int64> output_indices(spec.input_dims.size());
- expected->EachCell<float>(
- [&](tensorflow::gtl::ArraySlice<int64> indices, float) {
- for (int64 i = 0; i < indices.size(); ++i) {
- output_indices[i] = indices[i];
- }
- float value = input_literal->Get<float>(indices);
- for (int64 dim : spec.reversal) {
- output_indices[dim] = (spec.input_dims[dim] - 1) - indices[dim];
- }
- expected->Set<float>(output_indices, value);
- });
+ expected->EachCell<float>([&](absl::Span<const int64> indices, float) {
+ for (int64 i = 0; i < indices.size(); ++i) {
+ output_indices[i] = indices[i];
+ }
+ float value = input_literal->Get<float>(indices);
+ for (int64 dim : spec.reversal) {
+ output_indices[dim] = (spec.input_dims[dim] - 1) - indices[dim];
+ }
+ expected->Set<float>(output_indices, value);
+ });
ComputeAndCompareLiteral(&builder, *expected, {});
}
diff --git a/tensorflow/compiler/xla/tests/round_trip_packed_literal_test.cc b/tensorflow/compiler/xla/tests/round_trip_packed_literal_test.cc
index a620fe1908..d5a579cfaf 100644
--- a/tensorflow/compiler/xla/tests/round_trip_packed_literal_test.cc
+++ b/tensorflow/compiler/xla/tests/round_trip_packed_literal_test.cc
@@ -47,8 +47,7 @@ class RoundTripPackedLiteralTest : public ClientLibraryTestBase {
TEST_F(RoundTripPackedLiteralTest, RoundTripsR1F32Length2) {
string data(sizeof(float) * 2, 0);
- tensorflow::gtl::MutableArraySlice<float> floats(
- tensorflow::bit_cast<float*>(data.data()), 2);
+ absl::Span<float> floats(tensorflow::bit_cast<float*>(data.data()), 2);
floats[0] = 42.0;
floats[1] = 24.0;
@@ -70,8 +69,7 @@ TEST_F(RoundTripPackedLiteralTest, RoundTripsR1F32Length2) {
TEST_F(RoundTripPackedLiteralTest, RoundTripsR2F32Size2x2Dim0Minor) {
string data(sizeof(float) * 4, 0);
- tensorflow::gtl::MutableArraySlice<float> floats(
- tensorflow::bit_cast<float*>(data.data()), 4);
+ absl::Span<float> floats(tensorflow::bit_cast<float*>(data.data()), 4);
// With x as the minor dimension, these will become:
floats[0] = 42.0; // y=0,x=0
floats[1] = 24.0; // y=0,x=1
@@ -105,8 +103,7 @@ TEST_F(RoundTripPackedLiteralTest, RoundTripsR2F32Size2x2Dim0Minor) {
TEST_F(RoundTripPackedLiteralTest, RoundTripsR2F32Size2x2Dim1Minor) {
string data(sizeof(float) * 4, 0);
- tensorflow::gtl::MutableArraySlice<float> floats(
- tensorflow::bit_cast<float*>(data.data()), 4);
+ absl::Span<float> floats(tensorflow::bit_cast<float*>(data.data()), 4);
// With y as the minor dimension, these will become:
floats[0] = 42.0; // y=0,x=0
floats[1] = 24.0; // y=1,x=0
diff --git a/tensorflow/compiler/xla/tests/scalar_computations_test.cc b/tensorflow/compiler/xla/tests/scalar_computations_test.cc
index cf2d453f43..d60b8e969d 100644
--- a/tensorflow/compiler/xla/tests/scalar_computations_test.cc
+++ b/tensorflow/compiler/xla/tests/scalar_computations_test.cc
@@ -46,9 +46,8 @@ class ScalarComputationsTest : public ClientLibraryTestBase {
// A template for building and running a binary comparison test.
template <typename NativeT>
void TestCompare(NativeT lhs, NativeT rhs, bool expected,
- std::function<XlaOp(const XlaOp&, const XlaOp&,
- tensorflow::gtl::ArraySlice<int64>)>
- op) {
+ const std::function<XlaOp(const XlaOp&, const XlaOp&,
+ absl::Span<const int64>)>& op) {
XlaBuilder builder(TestName());
XlaOp lhs_op = ConstantR0<NativeT>(&builder, lhs);
XlaOp rhs_op = ConstantR0<NativeT>(&builder, rhs);
@@ -58,9 +57,8 @@ class ScalarComputationsTest : public ClientLibraryTestBase {
template <typename NativeT>
void TestMinMax(NativeT lhs, NativeT rhs, NativeT expected,
- std::function<XlaOp(const XlaOp&, const XlaOp&,
- tensorflow::gtl::ArraySlice<int64>)>
- op) {
+ const std::function<XlaOp(const XlaOp&, const XlaOp&,
+ absl::Span<const int64>)>& op) {
XlaBuilder builder(TestName());
XlaOp lhs_op = ConstantR0<NativeT>(&builder, lhs);
XlaOp rhs_op = ConstantR0<NativeT>(&builder, rhs);
diff --git a/tensorflow/compiler/xla/tests/scatter_test.cc b/tensorflow/compiler/xla/tests/scatter_test.cc
index 99eeb12e2b..1858dcea61 100644
--- a/tensorflow/compiler/xla/tests/scatter_test.cc
+++ b/tensorflow/compiler/xla/tests/scatter_test.cc
@@ -32,8 +32,7 @@ class ScatterTest : public HloTestBase {
RunTest(hlo_text, {operand, scatter_indices, updates});
}
- void RunTest(const string& hlo_text,
- tensorflow::gtl::ArraySlice<Literal*> args) {
+ void RunTest(const string& hlo_text, absl::Span<Literal* const> args) {
HloModuleConfig config;
config.set_debug_options(GetDebugOptionsForTest());
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
diff --git a/tensorflow/compiler/xla/tests/select_and_scatter_test.cc b/tensorflow/compiler/xla/tests/select_and_scatter_test.cc
index e3d4f98dd7..f737b5158b 100644
--- a/tensorflow/compiler/xla/tests/select_and_scatter_test.cc
+++ b/tensorflow/compiler/xla/tests/select_and_scatter_test.cc
@@ -42,8 +42,8 @@ struct SelectAndScatterTestParam {
std::vector<int64> operand_shape;
std::vector<int64> source_shape;
Padding padding_type;
- tensorflow::gtl::ArraySlice<int64> window_dimensions;
- tensorflow::gtl::ArraySlice<int64> window_strides;
+ absl::Span<const int64> window_dimensions;
+ absl::Span<const int64> window_strides;
};
class SelectAndScatterTest
diff --git a/tensorflow/compiler/xla/tests/slice_test.cc b/tensorflow/compiler/xla/tests/slice_test.cc
index 69585ae39a..01252a0cf1 100644
--- a/tensorflow/compiler/xla/tests/slice_test.cc
+++ b/tensorflow/compiler/xla/tests/slice_test.cc
@@ -194,7 +194,7 @@ class SliceR1Test : public ClientLibraryTestBase,
protected:
template <typename NativeT>
void Run(const R1Spec& spec) {
- // This can't be an std::vector, since you can't grab an ArraySlice of a
+ // This can't be an std::vector, since you can't grab a Span of a
// vector<bool>.
absl::InlinedVector<NativeT, 1> input(spec.input_dim0);
std::iota(input.begin(), input.end(), NativeT());
diff --git a/tensorflow/compiler/xla/tests/test_utils.cc b/tensorflow/compiler/xla/tests/test_utils.cc
index 60ada58b2a..c20a7c8fe4 100644
--- a/tensorflow/compiler/xla/tests/test_utils.cc
+++ b/tensorflow/compiler/xla/tests/test_utils.cc
@@ -183,8 +183,8 @@ StatusOr<std::unique_ptr<Literal>> MakeFakeLiteralInternal(
break;
case PRED: {
std::uniform_int_distribution<int> generator(0, 1);
- TF_CHECK_OK(literal->Populate<bool>(
- [&](tensorflow::gtl::ArraySlice<int64> /*indices*/) {
+ TF_CHECK_OK(
+ literal->Populate<bool>([&](absl::Span<const int64> /*indices*/) {
return generator(*engine);
}));
break;
@@ -236,8 +236,8 @@ bool NeedsInitValue(const HloUse& use) {
// Generate random values that are constrained to the input_shape minus the
// output_shape so as not to produce wrapping slices, for instance.
-std::unique_ptr<Literal> MakeRandomIndex(
- tensorflow::gtl::ArraySlice<int64> index_space, std::minstd_rand0* engine) {
+std::unique_ptr<Literal> MakeRandomIndex(absl::Span<const int64> index_space,
+ std::minstd_rand0* engine) {
std::vector<int32> start_indices(index_space.size());
if (engine != nullptr) {
for (int i = 0; i < index_space.size(); ++i) {
@@ -294,7 +294,7 @@ std::vector<HloInstruction*> FindConstrainedUses(
// generate a constrained literal (either bounded in the case of indices, or
// zero in the case of init_values for reductions).
StatusOr<std::unique_ptr<Literal>> CreateLiteralForConstrainedUses(
- const tensorflow::gtl::ArraySlice<HloInstruction*> constrained_uses,
+ const absl::Span<HloInstruction* const> constrained_uses,
const HloInstruction& param, std::minstd_rand0* engine) {
std::vector<int64> index_space;
bool no_duplicates = false;
diff --git a/tensorflow/compiler/xla/tests/tuple_test.cc b/tensorflow/compiler/xla/tests/tuple_test.cc
index c101cd2d20..f2b3b49015 100644
--- a/tensorflow/compiler/xla/tests/tuple_test.cc
+++ b/tensorflow/compiler/xla/tests/tuple_test.cc
@@ -507,7 +507,7 @@ XLA_TEST_F(TupleTest, ComplexTuples) {
{{10011, 20022}, {30031, 40042}}});
auto prod = absl::make_unique<Literal>(sum->shape());
ASSERT_TRUE(prod->Populate<complex64>(
- [&sum](tensorflow::gtl::ArraySlice<int64> indexes) {
+ [&sum](absl::Span<const int64> indexes) {
return sum->Get<complex64>(indexes) *
(indexes[indexes.size() - 1] == 0
? complex64(1, 2)
diff --git a/tensorflow/compiler/xla/tests/xla_hlo_profile_test.cc b/tensorflow/compiler/xla/tests/xla_hlo_profile_test.cc
index e3a0625780..7fd42944de 100644
--- a/tensorflow/compiler/xla/tests/xla_hlo_profile_test.cc
+++ b/tensorflow/compiler/xla/tests/xla_hlo_profile_test.cc
@@ -84,7 +84,7 @@ struct ParsedProfileOutputLine {
Status ParseOneProfileOutputLine(
const string& line, bool expect_hlo,
gtl::FlatMap<string, ParsedProfileOutputLine>* parsed_results,
- tensorflow::gtl::ArraySlice<absl::string_view> opcodes_to_ignore = {}) {
+ absl::Span<const absl::string_view> opcodes_to_ignore = {}) {
string separator = "[^:]*:: +";
string match_percentage = R"(\d+\.\d*% +\d+Σ)";
string match_cycles = R"((\d+) cycles +\( *()" + match_percentage + R"()\))";
diff --git a/tensorflow/compiler/xla/text_literal_writer.cc b/tensorflow/compiler/xla/text_literal_writer.cc
index 00147015a6..395e3b69ac 100644
--- a/tensorflow/compiler/xla/text_literal_writer.cc
+++ b/tensorflow/compiler/xla/text_literal_writer.cc
@@ -46,8 +46,7 @@ namespace xla {
Status status;
tensorflow::WritableFile* f_ptr = f.get();
literal.EachCellAsString(
- [f_ptr, &status](tensorflow::gtl::ArraySlice<int64> indices,
- const string& value) {
+ [f_ptr, &status](absl::Span<const int64> indices, const string& value) {
if (!status.ok()) {
return;
}
diff --git a/tensorflow/compiler/xla/tools/dumped_computation_to_graphviz.cc b/tensorflow/compiler/xla/tools/dumped_computation_to_graphviz.cc
index d15b71b792..e60b32cb54 100644
--- a/tensorflow/compiler/xla/tools/dumped_computation_to_graphviz.cc
+++ b/tensorflow/compiler/xla/tools/dumped_computation_to_graphviz.cc
@@ -46,7 +46,7 @@ limitations under the License.
namespace xla {
namespace tools {
-void RealMain(tensorflow::gtl::ArraySlice<char*> args) {
+void RealMain(absl::Span<char* const> args) {
Client* client = ClientLibrary::LocalClientOrDie();
for (char* arg : args) {
HloSnapshot module;
@@ -77,7 +77,7 @@ int main(int argc, char** argv) {
}
tensorflow::port::InitMain(argv[0], &argc, &argv);
- tensorflow::gtl::ArraySlice<char*> args(argv, argc);
+ absl::Span<char* const> args(argv, argc);
args.remove_prefix(1); // Pop off the binary name, argv[0]
xla::tools::RealMain(args);
return 0;
diff --git a/tensorflow/compiler/xla/tools/dumped_computation_to_operation_list.cc b/tensorflow/compiler/xla/tools/dumped_computation_to_operation_list.cc
index c446b27a04..b9073a3f43 100644
--- a/tensorflow/compiler/xla/tools/dumped_computation_to_operation_list.cc
+++ b/tensorflow/compiler/xla/tools/dumped_computation_to_operation_list.cc
@@ -59,7 +59,7 @@ class OperationDumper : public DfsHloVisitorWithDefault {
string path_;
};
-void RealMain(tensorflow::gtl::ArraySlice<char*> args) {
+void RealMain(absl::Span<char* const> args) {
LocalClient* client = ClientLibrary::LocalClientOrDie();
LocalService* local_service =
ClientLibrary::GetXlaService(client->platform());
@@ -104,7 +104,7 @@ void RealMain(tensorflow::gtl::ArraySlice<char*> args) {
int main(int argc, char** argv) {
tensorflow::port::InitMain(argv[0], &argc, &argv);
- tensorflow::gtl::ArraySlice<char*> args(argv, argc);
+ absl::Span<char* const> args(argv, argc);
args.remove_prefix(1); // Pop off the binary name, argv[0]
xla::tools::RealMain(args);
return 0;
diff --git a/tensorflow/compiler/xla/tools/dumped_computation_to_text.cc b/tensorflow/compiler/xla/tools/dumped_computation_to_text.cc
index d86a4474b3..959fc8311b 100644
--- a/tensorflow/compiler/xla/tools/dumped_computation_to_text.cc
+++ b/tensorflow/compiler/xla/tools/dumped_computation_to_text.cc
@@ -34,7 +34,7 @@ limitations under the License.
namespace xla {
namespace tools {
-void RealMain(tensorflow::gtl::ArraySlice<char*> args, bool compile) {
+void RealMain(absl::Span<char* const> args, bool compile) {
LocalClient* client = ClientLibrary::LocalClientOrDie();
LocalService* local_service =
ClientLibrary::GetXlaService(client->platform());
@@ -102,7 +102,7 @@ int main(int argc, char** argv) {
tensorflow::port::InitMain(usage.c_str(), &argc, &argv);
QCHECK(argc > 1) << "\nERROR: must specify at least one module\n" << usage;
- tensorflow::gtl::ArraySlice<char*> args(argv, argc);
+ absl::Span<char* const> args(argv, argc);
args.remove_prefix(1); // Pop off the binary name, argv[0]
xla::tools::RealMain(args, compile);
return 0;
diff --git a/tensorflow/compiler/xla/tools/dumped_computation_to_tf_graphdef.cc b/tensorflow/compiler/xla/tools/dumped_computation_to_tf_graphdef.cc
index bd8b89542f..ef4d09a04b 100644
--- a/tensorflow/compiler/xla/tools/dumped_computation_to_tf_graphdef.cc
+++ b/tensorflow/compiler/xla/tools/dumped_computation_to_tf_graphdef.cc
@@ -45,7 +45,7 @@ using tensorflow::Env;
namespace xla {
namespace tools {
-void RealMain(tensorflow::gtl::ArraySlice<char*> args) {
+void RealMain(absl::Span<char* const> args) {
Client* client = ClientLibrary::LocalClientOrDie();
for (char* arg : args) {
HloSnapshot module;
@@ -78,7 +78,7 @@ int main(int argc, char** argv) {
tensorflow::port::InitMain(argv[0], &argc, &argv);
- tensorflow::gtl::ArraySlice<char*> args(argv, argc);
+ absl::Span<char* const> args(argv, argc);
args.remove_prefix(1); // Pop off the binary name, argv[0]
xla::tools::RealMain(args);
return 0;
diff --git a/tensorflow/compiler/xla/tools/replay_computation.cc b/tensorflow/compiler/xla/tools/replay_computation.cc
index e826d6fa93..2695e2e4b1 100644
--- a/tensorflow/compiler/xla/tools/replay_computation.cc
+++ b/tensorflow/compiler/xla/tools/replay_computation.cc
@@ -253,7 +253,7 @@ StatusOr<HloSnapshot> ParseInputFile(const string& filename,
return InvalidArgument("Could not parse %s.", filename);
}
-int RealMain(tensorflow::gtl::ArraySlice<char*> args, const Options& opts) {
+int RealMain(absl::Span<char* const> args, const Options& opts) {
LocalClient* client = ClientLibrary::LocalClientOrDie();
int exit_status = EXIT_SUCCESS;
@@ -344,7 +344,7 @@ int main(int argc, char** argv) {
LOG(QFATAL) << usage;
}
- tensorflow::gtl::ArraySlice<char*> args(argv, argc);
+ absl::Span<char* const> args(argv, argc);
args.remove_prefix(1); // Pop off the binary name, argv[0]
return xla::tools::RealMain(args, opts);
}
diff --git a/tensorflow/compiler/xla/tools/show_signature.cc b/tensorflow/compiler/xla/tools/show_signature.cc
index 10e7202acf..f7eb4a79ca 100644
--- a/tensorflow/compiler/xla/tools/show_signature.cc
+++ b/tensorflow/compiler/xla/tools/show_signature.cc
@@ -45,7 +45,7 @@ limitations under the License.
namespace xla {
namespace tools {
-void RealMain(tensorflow::gtl::ArraySlice<char*> args) {
+void RealMain(absl::Span<char* const> args) {
Client* client = ClientLibrary::LocalClientOrDie();
for (char* arg : args) {
HloSnapshot module;
@@ -66,7 +66,7 @@ void RealMain(tensorflow::gtl::ArraySlice<char*> args) {
int main(int argc, char** argv) {
tensorflow::port::InitMain(argv[0], &argc, &argv);
- tensorflow::gtl::ArraySlice<char*> args(argv, argc);
+ absl::Span<char* const> args(argv, argc);
args.remove_prefix(1); // Pop off the binary name, argv[0]
xla::tools::RealMain(args);
return 0;
diff --git a/tensorflow/compiler/xla/util.cc b/tensorflow/compiler/xla/util.cc
index 0f607a0c8a..68cab7387c 100644
--- a/tensorflow/compiler/xla/util.cc
+++ b/tensorflow/compiler/xla/util.cc
@@ -76,7 +76,7 @@ string Reindent(absl::string_view original,
});
}
-bool IsPermutation(tensorflow::gtl::ArraySlice<int64> permutation, int64 rank) {
+bool IsPermutation(absl::Span<const int64> permutation, int64 rank) {
if (rank != permutation.size()) {
return false;
}
@@ -90,7 +90,7 @@ bool IsPermutation(tensorflow::gtl::ArraySlice<int64> permutation, int64 rank) {
}
std::vector<int64> InversePermutation(
- tensorflow::gtl::ArraySlice<int64> input_permutation) {
+ absl::Span<const int64> input_permutation) {
DCHECK(IsPermutation(input_permutation, input_permutation.size()));
std::vector<int64> output_permutation(input_permutation.size(), -1);
for (size_t i = 0; i < input_permutation.size(); ++i) {
@@ -99,8 +99,8 @@ std::vector<int64> InversePermutation(
return output_permutation;
}
-std::vector<int64> ComposePermutations(tensorflow::gtl::ArraySlice<int64> p1,
- tensorflow::gtl::ArraySlice<int64> p2) {
+std::vector<int64> ComposePermutations(absl::Span<const int64> p1,
+ absl::Span<const int64> p2) {
CHECK_EQ(p1.size(), p2.size());
std::vector<int64> output;
for (size_t i = 0; i < p1.size(); ++i) {
@@ -109,7 +109,7 @@ std::vector<int64> ComposePermutations(tensorflow::gtl::ArraySlice<int64> p1,
return output;
}
-bool IsIdentityPermutation(tensorflow::gtl::ArraySlice<int64> permutation) {
+bool IsIdentityPermutation(absl::Span<const int64> permutation) {
for (int64 i = 0; i < permutation.size(); ++i) {
if (permutation[i] != i) {
return false;
@@ -130,7 +130,7 @@ PaddingConfig MakeNoPaddingConfig(int64 rank) {
}
PaddingConfig MakeEdgePaddingConfig(
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding) {
+ absl::Span<const std::pair<int64, int64>> padding) {
PaddingConfig padding_config;
for (const std::pair<int64, int64>& dim : padding) {
auto dimension = padding_config.add_dimensions();
@@ -207,14 +207,13 @@ void LogLines(int sev, absl::string_view text, const char* fname, int lineno) {
}
}
-int64 Product(tensorflow::gtl::ArraySlice<int64> xs) {
+int64 Product(absl::Span<const int64> xs) {
return std::accumulate(xs.begin(), xs.end(), static_cast<int64>(1),
std::multiplies<int64>());
}
-std::vector<std::pair<int64, int64>> CommonFactors(
- tensorflow::gtl::ArraySlice<int64> a,
- tensorflow::gtl::ArraySlice<int64> b) {
+std::vector<std::pair<int64, int64>> CommonFactors(absl::Span<const int64> a,
+ absl::Span<const int64> b) {
CHECK_EQ(Product(a), Product(b));
if (0 == Product(a)) {
return {std::make_pair(0, 0), std::make_pair(a.size(), b.size())};
diff --git a/tensorflow/compiler/xla/util.h b/tensorflow/compiler/xla/util.h
index c8b48c5ab4..6f7d97e30e 100644
--- a/tensorflow/compiler/xla/util.h
+++ b/tensorflow/compiler/xla/util.h
@@ -101,74 +101,99 @@ struct ScopedLoggingTimer {
uint64 start_micros;
};
-// Given a vector<T>, returns a MutableArraySlice<char> that points at its
+// Given a vector<T>, returns a Span<char> that points at its
// internals.
//
// Warning: if the vector is updated its storage pointer may change, so use this
// with caution (ideally in limited scopes with temporary lifetimes).
template <typename T>
-tensorflow::gtl::MutableArraySlice<uint8> MutableByteSlice(std::vector<T>* v) {
- return tensorflow::gtl::MutableArraySlice<uint8>(
- reinterpret_cast<uint8*>(v->data()), v->size() * sizeof(T));
+absl::Span<uint8> MutableByteSlice(std::vector<T>* v) {
+ return absl::Span<uint8>(reinterpret_cast<uint8*>(v->data()),
+ v->size() * sizeof(T));
}
// Turns an immutable slice of type T into an immutable slice of bytes with the
// same byte size.
template <typename T>
-tensorflow::gtl::ArraySlice<uint8> CastToByteSlice(
- tensorflow::gtl::ArraySlice<T> slice) {
- return tensorflow::gtl::ArraySlice<uint8>(
- reinterpret_cast<const uint8*>(slice.data()), slice.size() * sizeof(T));
+absl::Span<const uint8> CastToByteSlice(absl::Span<const T> slice) {
+ return absl::Span<const uint8>(reinterpret_cast<const uint8*>(slice.data()),
+ slice.size() * sizeof(T));
}
// Casts a byte slice to a non-byte type T, checking that the original slice
// length is a multiple of sizeof(T).
template <typename T>
-tensorflow::gtl::ArraySlice<T> CastByteSlice(
- tensorflow::gtl::ArraySlice<uint8> slice) {
+absl::Span<const T> CastByteSlice(absl::Span<const uint8> slice) {
CHECK_EQ(0, slice.size() % sizeof(T));
- return tensorflow::gtl::ArraySlice<T>(
- reinterpret_cast<const T*>(slice.data()), slice.size() / sizeof(T));
+ return absl::Span<const T>(reinterpret_cast<const T*>(slice.data()),
+ slice.size() / sizeof(T));
}
// Convenience function to force a vector to convert to an immutable slice.
template <typename T>
-tensorflow::gtl::ArraySlice<T> AsSlice(const std::vector<T>& v) {
- return tensorflow::gtl::ArraySlice<T>(v);
+absl::Span<const T> AsSlice(const std::vector<T>& v) {
+ return absl::Span<const T>(v);
}
-// Converts a mutable vector pointer into a MutableArraySlice of the same
+// Converts a mutable vector pointer into a Span of the same
// type.
template <typename T>
-tensorflow::gtl::MutableArraySlice<T> AsMutableSlice(std::vector<T>* v) {
- return tensorflow::gtl::MutableArraySlice<T>(v->data(), v->size());
+absl::Span<T> AsMutableSlice(std::vector<T>* v) {
+ return absl::Span<T>(v->data(), v->size());
}
// xla::int64 is not the same type as tensorflow::protobuf_int64 in open-source.
// Wrapper function that gives an int64 array slice view of a repeated int64
// protobuf field.
-static inline tensorflow::gtl::ArraySlice<int64> AsInt64Slice(
+static inline absl::Span<const int64> AsInt64Slice(
const tensorflow::protobuf::RepeatedField<tensorflow::protobuf_int64>& v) {
- tensorflow::gtl::ArraySlice<tensorflow::protobuf_int64> slice(v);
- return tensorflow::gtl::ArraySlice<int64>(
- reinterpret_cast<const int64*>(slice.data()), slice.size());
+ absl::Span<const tensorflow::protobuf_int64> slice(v);
+ return absl::Span<const int64>(reinterpret_cast<const int64*>(slice.data()),
+ slice.size());
}
// As above, but for uint64 types.
-static inline tensorflow::gtl::ArraySlice<uint64> AsUInt64Slice(
+static inline absl::Span<const uint64> AsUInt64Slice(
const tensorflow::protobuf::RepeatedField<tensorflow::protobuf_uint64>& v) {
- tensorflow::gtl::ArraySlice<tensorflow::protobuf_uint64> slice(v);
- return tensorflow::gtl::ArraySlice<uint64>(
- reinterpret_cast<const uint64*>(slice.data()), slice.size());
+ absl::Span<const tensorflow::protobuf_uint64> slice(v);
+ return absl::Span<const uint64>(reinterpret_cast<const uint64*>(slice.data()),
+ slice.size());
+}
+
+// Compares two containers for equality. Returns true iff the two containers
+// have the same size and all their elements compare equal using their
+// operator==. Like std::equal, but forces size equality.
+template <typename Container1T, typename Container2T>
+bool ContainersEqual(const Container1T& c1, const Container2T& c2) {
+ return ((c1.size() == c2.size()) &&
+ std::equal(std::begin(c1), std::end(c1), std::begin(c2)));
+}
+
+template <typename Container1T,
+ typename ElementType = typename Container1T::value_type>
+bool ContainersEqual(const Container1T& c1,
+ std::initializer_list<ElementType> il) {
+ absl::Span<const ElementType> c2{il};
+ return ContainersEqual(c1, c2);
+}
+
+// Compares two containers for equality. Returns true iff the two containers
+// have the same size and all their elements compare equal using the predicate
+// p. Like std::equal, but forces size equality.
+template <typename Container1T, typename Container2T, class PredicateT>
+bool ContainersEqual(const Container1T& c1, const Container2T& c2,
+ PredicateT p) {
+ return ((c1.size() == c2.size()) &&
+ std::equal(std::begin(c1), std::end(c1), std::begin(c2), p));
}
// Performs a copy of count values from src to dest, using different strides for
// source and destination. The source starting index is src_base, while the
// destination one is dest_base.
template <typename D, typename S>
-void StridedCopy(tensorflow::gtl::MutableArraySlice<D> dest, int64 dest_base,
- int64 dest_stride, tensorflow::gtl::ArraySlice<S> src,
- int64 src_base, int64 src_stride, int64 count) {
+void StridedCopy(absl::Span<D> dest, int64 dest_base, int64 dest_stride,
+ absl::Span<const S> src, int64 src_base, int64 src_stride,
+ int64 count) {
for (; count > 0; --count, dest_base += dest_stride, src_base += src_stride) {
dest[dest_base] = static_cast<D>(src[src_base]);
}
@@ -258,7 +283,7 @@ Status ResourceExhaustedStrCat(Args&&... concat) {
string Reindent(absl::string_view original, absl::string_view indentation);
// Checks whether permutation is a permutation of the [0, rank) integer range.
-bool IsPermutation(tensorflow::gtl::ArraySlice<int64> permutation, int64 rank);
+bool IsPermutation(absl::Span<const int64> permutation, int64 rank);
// Applies `permutation` on `input` and returns the permuted array.
// For each i, output[permutation[i]] = input[i].
@@ -268,9 +293,9 @@ bool IsPermutation(tensorflow::gtl::ArraySlice<int64> permutation, int64 rank);
// 2. permutation.size() == input.size().
template <typename Container>
std::vector<typename Container::value_type> Permute(
- tensorflow::gtl::ArraySlice<int64> permutation, const Container& input) {
+ absl::Span<const int64> permutation, const Container& input) {
using T = typename Container::value_type;
- tensorflow::gtl::ArraySlice<T> data(input);
+ absl::Span<const T> data(input);
CHECK(IsPermutation(permutation, data.size()));
std::vector<T> output(data.size());
for (size_t i = 0; i < permutation.size(); ++i) {
@@ -281,14 +306,14 @@ std::vector<typename Container::value_type> Permute(
// Inverts a permutation, i.e., output_permutation[input_permutation[i]] = i.
std::vector<int64> InversePermutation(
- tensorflow::gtl::ArraySlice<int64> input_permutation);
+ absl::Span<const int64> input_permutation);
// Composes two permutations: output[i] = p1[p2[i]].
-std::vector<int64> ComposePermutations(tensorflow::gtl::ArraySlice<int64> p1,
- tensorflow::gtl::ArraySlice<int64> p2);
+std::vector<int64> ComposePermutations(absl::Span<const int64> p1,
+ absl::Span<const int64> p2);
// Returns true iff permutation == {0, 1, 2, ...}.
-bool IsIdentityPermutation(tensorflow::gtl::ArraySlice<int64> permutation);
+bool IsIdentityPermutation(absl::Span<const int64> permutation);
template <typename Container>
int64 PositionInContainer(const Container& container, int64 value) {
@@ -342,7 +367,7 @@ PaddingConfig MakeNoPaddingConfig(int64 rank);
// Returns a PaddingConfig object where 'padding' contains
// (low edge padding, high edge padding) pairs for each dimension.
PaddingConfig MakeEdgePaddingConfig(
- tensorflow::gtl::ArraySlice<std::pair<int64, int64>> padding);
+ absl::Span<const std::pair<int64, int64>> padding);
// Returns true if the padding configuration has at least one dimension with
// non-zero interior padding.
@@ -409,7 +434,7 @@ std::unique_ptr<Derived> unique_ptr_static_cast(std::unique_ptr<Base> ptr) {
return std::unique_ptr<Derived>(static_cast<Derived*>(ptr.release()));
}
-int64 Product(tensorflow::gtl::ArraySlice<int64> xs);
+int64 Product(absl::Span<const int64> xs);
// Returns the start indices of consecutive non-overlapping subsequences of `a`
// and `b` with the same product, i.e. `(i, j)` so
@@ -422,8 +447,8 @@ int64 Product(tensorflow::gtl::ArraySlice<int64> xs);
//
// If the given shapes have non-zero size, returns the bounds of the shortest
// possible such subsequences; else, returns `{(0, 0), (a.size, b.size)}`.
-std::vector<std::pair<int64, int64>> CommonFactors(
- tensorflow::gtl::ArraySlice<int64> a, tensorflow::gtl::ArraySlice<int64> b);
+std::vector<std::pair<int64, int64>> CommonFactors(absl::Span<const int64> a,
+ absl::Span<const int64> b);
// Removes illegal characters from filenames.
string SanitizeFileName(string file_name);
@@ -445,7 +470,7 @@ void EraseAt(C* c, int64 index) {
}
template <typename T>
-std::vector<T> ArraySliceToVector(tensorflow::gtl::ArraySlice<T> slice) {
+std::vector<T> ArraySliceToVector(absl::Span<const T> slice) {
return std::vector<T>(slice.begin(), slice.end());
}
diff --git a/tensorflow/compiler/xla/window_util.cc b/tensorflow/compiler/xla/window_util.cc
index 268dc5db01..83735d239f 100644
--- a/tensorflow/compiler/xla/window_util.cc
+++ b/tensorflow/compiler/xla/window_util.cc
@@ -24,7 +24,7 @@ limitations under the License.
namespace xla {
namespace window_util {
-Window MakeWindow(tensorflow::gtl::ArraySlice<int64> sizes) {
+Window MakeWindow(absl::Span<const int64> sizes) {
Window window;
for (int64 size : sizes) {
auto* dimension = window.add_dimensions();
@@ -36,7 +36,7 @@ Window MakeWindow(tensorflow::gtl::ArraySlice<int64> sizes) {
return window;
}
-PaddingConfig MakeSymmetricPadding(tensorflow::gtl::ArraySlice<int64> sizes) {
+PaddingConfig MakeSymmetricPadding(absl::Span<const int64> sizes) {
PaddingConfig config;
for (int64 size : sizes) {
auto* dimension = config.add_dimensions();
diff --git a/tensorflow/compiler/xla/window_util.h b/tensorflow/compiler/xla/window_util.h
index ba473e2c8c..ed93ecc30d 100644
--- a/tensorflow/compiler/xla/window_util.h
+++ b/tensorflow/compiler/xla/window_util.h
@@ -25,13 +25,13 @@ namespace window_util {
// Creates a window with the given sizes in the dimensions and all strides set
// to 1.
-Window MakeWindow(tensorflow::gtl::ArraySlice<int64> sizes);
+Window MakeWindow(absl::Span<const int64> sizes);
// Creates a padding config with symmetrical padding in each dimension, of value
// given by sizes; e.g. {0, 1, 2} would create a R3 padding config that had zero
// pixels of padding in dimension 0, one pixel of padding symmetrically, on each
// side of dimension 1, and two pixels of padding symmetrically on dimension 2.
-PaddingConfig MakeSymmetricPadding(tensorflow::gtl::ArraySlice<int64> sizes);
+PaddingConfig MakeSymmetricPadding(absl::Span<const int64> sizes);
string ToString(const WindowDimension& dim);
string ToString(const Window& window);