aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/compiler/xla/service
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-08-17 22:16:36 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-08-17 22:20:29 -0700
commit39b17f9d56d6b9a02a30bfa24ac9e15ab37ca761 (patch)
treef6ded73c971ab178bde3f1c319e6ce867e95ae29 /tensorflow/compiler/xla/service
parent4a41f50648929197954d892559587cb76458d306 (diff)
Automated rollback of commit 4a41f50648929197954d892559587cb76458d306
PiperOrigin-RevId: 209248552
Diffstat (limited to 'tensorflow/compiler/xla/service')
-rw-r--r--tensorflow/compiler/xla/service/BUILD20
-rw-r--r--tensorflow/compiler/xla/service/algebraic_simplifier.cc8
-rw-r--r--tensorflow/compiler/xla/service/batch_dot_simplification.cc9
-rw-r--r--tensorflow/compiler/xla/service/cpu/BUILD1
-rw-r--r--tensorflow/compiler/xla/service/cpu/vector_support_library.cc5
-rw-r--r--tensorflow/compiler/xla/service/elemental_ir_emitter.cc5
-rw-r--r--tensorflow/compiler/xla/service/gather_expander.cc9
-rw-r--r--tensorflow/compiler/xla/service/gpu/BUILD3
-rw-r--r--tensorflow/compiler/xla/service/gpu/fusion_merger.cc22
-rw-r--r--tensorflow/compiler/xla/service/gpu/ir_emitter.cc3
-rw-r--r--tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc19
-rw-r--r--tensorflow/compiler/xla/service/gpu/multi_output_fusion.cc7
-rw-r--r--tensorflow/compiler/xla/service/hlo_computation.cc7
-rw-r--r--tensorflow/compiler/xla/service/hlo_creation_utils.cc17
-rw-r--r--tensorflow/compiler/xla/service/hlo_evaluator.cc19
-rw-r--r--tensorflow/compiler/xla/service/hlo_evaluator_typed_visitor.h11
-rw-r--r--tensorflow/compiler/xla/service/hlo_execution_profile.cc11
-rw-r--r--tensorflow/compiler/xla/service/hlo_instruction.cc3
-rw-r--r--tensorflow/compiler/xla/service/hlo_instructions.cc3
-rw-r--r--tensorflow/compiler/xla/service/hlo_module.cc7
-rw-r--r--tensorflow/compiler/xla/service/hlo_parser.cc14
-rw-r--r--tensorflow/compiler/xla/service/indexed_array_analysis.cc66
-rw-r--r--tensorflow/compiler/xla/service/instruction_fusion.cc3
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/BUILD1
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/ir_array.h3
-rw-r--r--tensorflow/compiler/xla/service/reshape_mover.cc4
-rw-r--r--tensorflow/compiler/xla/service/scatter_expander.cc3
-rw-r--r--tensorflow/compiler/xla/service/shape_inference.cc39
-rw-r--r--tensorflow/compiler/xla/service/while_loop_constant_sinking.cc11
-rw-r--r--tensorflow/compiler/xla/service/while_loop_invariant_code_motion.cc15
-rw-r--r--tensorflow/compiler/xla/service/while_util.cc8
-rw-r--r--tensorflow/compiler/xla/service/while_util_test.cc3
32 files changed, 150 insertions, 209 deletions
diff --git a/tensorflow/compiler/xla/service/BUILD b/tensorflow/compiler/xla/service/BUILD
index 12ec38736e..a65bdebf51 100644
--- a/tensorflow/compiler/xla/service/BUILD
+++ b/tensorflow/compiler/xla/service/BUILD
@@ -175,7 +175,6 @@ cc_library(
"//tensorflow/compiler/xla:window_util",
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/core:lib",
- "@com_google_absl//absl/algorithm:container",
],
)
@@ -238,7 +237,6 @@ cc_library(
"//tensorflow/compiler/xla:window_util",
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/core:lib",
- "@com_google_absl//absl/algorithm:container",
],
)
@@ -313,7 +311,6 @@ cc_library(
"//tensorflow/core:human_readable_json",
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
- "@com_google_absl//absl/algorithm:container",
],
)
@@ -1145,7 +1142,6 @@ cc_library(
":hlo_pass",
"//tensorflow/compiler/xla:util",
"//tensorflow/core:lib",
- "@com_google_absl//absl/algorithm:container",
],
)
@@ -1185,7 +1181,6 @@ cc_library(
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:util",
- "@com_google_absl//absl/algorithm:container",
],
)
@@ -1236,7 +1231,6 @@ cc_library(
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:util",
- "@com_google_absl//absl/algorithm:container",
],
)
@@ -1251,7 +1245,6 @@ cc_library(
":while_util",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:statusor",
- "@com_google_absl//absl/algorithm:container",
],
)
@@ -1296,7 +1289,6 @@ cc_library(
"//tensorflow/compiler/xla:window_util",
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/core:lib",
- "@com_google_absl//absl/algorithm:container",
],
)
@@ -1331,7 +1323,8 @@ cc_library(
":hlo",
":hlo_creation_utils",
":hlo_pass",
- "@com_google_absl//absl/algorithm:container",
+ "//tensorflow/compiler/xla:xla_data_proto",
+ "//tensorflow/core:lib",
],
)
@@ -1589,7 +1582,6 @@ cc_library(
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:util",
"//tensorflow/core:lib",
- "@com_google_absl//absl/algorithm:container",
],
)
@@ -1752,7 +1744,6 @@ cc_library(
"//tensorflow/compiler/xla:util",
"//tensorflow/core:lib",
"//tensorflow/core:stream_executor_no_cuda",
- "@com_google_absl//absl/algorithm:container",
],
)
@@ -2574,7 +2565,6 @@ cc_library(
"//tensorflow/compiler/xla/service/llvm_ir:loop_emitter",
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
- "@com_google_absl//absl/algorithm:container",
"@llvm//:core",
"@llvm//:transform_utils",
],
@@ -2937,7 +2927,6 @@ cc_library(
":tuple_util",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/core:lib",
- "@com_google_absl//absl/algorithm:container",
],
)
@@ -2951,7 +2940,6 @@ tf_cc_test(
"//tensorflow/compiler/xla/service:hlo_matchers",
"//tensorflow/compiler/xla/service:hlo_parser",
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
- "@com_google_absl//absl/algorithm:container",
],
)
@@ -2967,7 +2955,6 @@ cc_library(
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:util",
"//tensorflow/core:lib",
- "@com_google_absl//absl/algorithm:container",
],
)
@@ -2995,7 +2982,6 @@ cc_library(
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:util",
"//tensorflow/core:lib",
- "@com_google_absl//absl/algorithm:container",
],
)
@@ -3050,7 +3036,6 @@ cc_library(
"//tensorflow/compiler/xla:util",
"//tensorflow/core:lib",
"//tensorflow/core:ptr_util",
- "@com_google_absl//absl/algorithm:container",
],
)
@@ -3084,7 +3069,6 @@ cc_library(
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
- "@com_google_absl//absl/algorithm:container",
],
)
diff --git a/tensorflow/compiler/xla/service/algebraic_simplifier.cc b/tensorflow/compiler/xla/service/algebraic_simplifier.cc
index 2c539eb99a..f7812d9661 100644
--- a/tensorflow/compiler/xla/service/algebraic_simplifier.cc
+++ b/tensorflow/compiler/xla/service/algebraic_simplifier.cc
@@ -22,7 +22,6 @@ limitations under the License.
#include <utility>
#include <vector>
-#include "absl/algorithm/container.h"
#include "tensorflow/compiler/xla/layout_util.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
@@ -1753,8 +1752,8 @@ Status AlgebraicSimplifierVisitor::HandleSlice(HloInstruction* slice) {
}
auto is_unstrided_slice = [](const HloInstruction* hlo) {
- return absl::c_all_of(hlo->slice_strides(),
- [](int64 stride) { return stride == 1; });
+ return c_all_of(hlo->slice_strides(),
+ [](int64 stride) { return stride == 1; });
};
if (slice->operand(0)->opcode() == HloOpcode::kSlice &&
is_unstrided_slice(slice) && is_unstrided_slice(slice->operand(0))) {
@@ -1931,8 +1930,7 @@ Status AlgebraicSimplifierVisitor::HandleReduce(HloInstruction* reduce) {
// This should make fusion easier or use less memory bandwidth in the unfused
// case.
if (arg->opcode() == HloOpcode::kConcatenate &&
- absl::c_linear_search(reduce->dimensions(),
- arg->concatenate_dimension())) {
+ c_linear_search(reduce->dimensions(), arg->concatenate_dimension())) {
HloInstruction* old_reduce = nullptr;
for (HloInstruction* operand : arg->operands()) {
HloInstruction* new_reduce = computation_->AddInstruction(
diff --git a/tensorflow/compiler/xla/service/batch_dot_simplification.cc b/tensorflow/compiler/xla/service/batch_dot_simplification.cc
index b226e7ecb0..2099916509 100644
--- a/tensorflow/compiler/xla/service/batch_dot_simplification.cc
+++ b/tensorflow/compiler/xla/service/batch_dot_simplification.cc
@@ -15,7 +15,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/batch_dot_simplification.h"
-#include "absl/algorithm/container.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_creation_utils.h"
@@ -85,10 +84,10 @@ StatusOr<bool> BatchDotSimplification::Run(HloModule* module) {
bool changed = false;
std::vector<HloInstruction*> dot_instrs;
for (HloComputation* computation : module->MakeNonfusionComputations()) {
- absl::c_copy_if(computation->instructions(), std::back_inserter(dot_instrs),
- [](HloInstruction* instr) {
- return instr->opcode() == HloOpcode::kDot;
- });
+ c_copy_if(computation->instructions(), std::back_inserter(dot_instrs),
+ [](HloInstruction* instr) {
+ return instr->opcode() == HloOpcode::kDot;
+ });
}
for (HloInstruction* dot_instr : dot_instrs) {
TF_ASSIGN_OR_RETURN(bool elided_batch_dim_from_one,
diff --git a/tensorflow/compiler/xla/service/cpu/BUILD b/tensorflow/compiler/xla/service/cpu/BUILD
index 9cad674934..fe1ef78533 100644
--- a/tensorflow/compiler/xla/service/cpu/BUILD
+++ b/tensorflow/compiler/xla/service/cpu/BUILD
@@ -893,7 +893,6 @@ cc_library(
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/compiler/xla/service/llvm_ir:llvm_util",
"//tensorflow/core:lib",
- "@com_google_absl//absl/algorithm:container",
"@llvm//:core",
"@llvm//:support",
],
diff --git a/tensorflow/compiler/xla/service/cpu/vector_support_library.cc b/tensorflow/compiler/xla/service/cpu/vector_support_library.cc
index 962ea69c09..3274be8d9d 100644
--- a/tensorflow/compiler/xla/service/cpu/vector_support_library.cc
+++ b/tensorflow/compiler/xla/service/cpu/vector_support_library.cc
@@ -15,7 +15,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/cpu/vector_support_library.h"
-#include "absl/algorithm/container.h"
#include "llvm/Support/raw_ostream.h"
#include "tensorflow/compiler/xla/service/cpu/target_machine_features.h"
#include "tensorflow/compiler/xla/service/llvm_ir/llvm_util.h"
@@ -423,8 +422,8 @@ TileVariable::TileVariable(VectorSupportLibrary* vector_support,
std::vector<llvm::Value*> TileVariable::Get() const {
std::vector<llvm::Value*> result;
- absl::c_transform(storage_, std::back_inserter(result),
- [&](VectorVariable vect_var) { return vect_var.Get(); });
+ c_transform(storage_, std::back_inserter(result),
+ [&](VectorVariable vect_var) { return vect_var.Get(); });
return result;
}
diff --git a/tensorflow/compiler/xla/service/elemental_ir_emitter.cc b/tensorflow/compiler/xla/service/elemental_ir_emitter.cc
index 4b19aa5df9..891ae42141 100644
--- a/tensorflow/compiler/xla/service/elemental_ir_emitter.cc
+++ b/tensorflow/compiler/xla/service/elemental_ir_emitter.cc
@@ -21,7 +21,6 @@ limitations under the License.
#include <vector>
// IWYU pragma: no_include "llvm/IR/Intrinsics.gen.inc"
-#include "absl/algorithm/container.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
@@ -1673,7 +1672,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitElementalGather(
std::vector<int64> operand_to_output_dim(operand_shape.dimensions_size(), -1);
for (int64 i = 0, e = operand_shape.dimensions_size(), operand_index_dim = 0;
i < e; i++) {
- if (absl::c_binary_search(dim_numbers.collapsed_slice_dims(), i)) {
+ if (c_binary_search(dim_numbers.collapsed_slice_dims(), i)) {
operand_index.push_back(index.GetConstantWithIndexType(0));
} else {
int64 output_window_dim = dim_numbers.offset_dims(operand_index_dim++);
@@ -1687,7 +1686,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitElementalGather(
{
std::vector<llvm::Value*> gather_index_index_components;
for (int64 i = 0, e = output_shape.dimensions_size(); i < e; i++) {
- if (!absl::c_binary_search(dim_numbers.offset_dims(), i)) {
+ if (!c_binary_search(dim_numbers.offset_dims(), i)) {
gather_index_index.push_back(index[i]);
}
}
diff --git a/tensorflow/compiler/xla/service/gather_expander.cc b/tensorflow/compiler/xla/service/gather_expander.cc
index d889fd8e88..9370c88710 100644
--- a/tensorflow/compiler/xla/service/gather_expander.cc
+++ b/tensorflow/compiler/xla/service/gather_expander.cc
@@ -15,7 +15,6 @@ limitations under the License.
#include <utility>
-#include "absl/algorithm/container.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/gather_expander.h"
#include "tensorflow/compiler/xla/service/hlo_creation_utils.h"
@@ -231,7 +230,7 @@ static StatusOr<HloInstruction*> CreateGatherLoopAccumulatorInitValue(
accumulator_state_shape_dims.reserve(1 + slice_sizes.size());
accumulator_state_shape_dims.push_back(gather_loop_trip_count);
for (int64 i = 0; i < slice_sizes.size(); i++) {
- if (!absl::c_binary_search(dim_numbers.collapsed_slice_dims(), i)) {
+ if (!c_binary_search(dim_numbers.collapsed_slice_dims(), i)) {
accumulator_state_shape_dims.push_back(slice_sizes[i]);
}
}
@@ -252,7 +251,7 @@ static StatusOr<HloInstruction*> PermuteBatchAndOffsetDims(
int64 batch_idx_counter = 0;
int64 offset_idx_counter = output_rank - offset_dims.size();
for (int64 i = 0; i < output_rank; i++) {
- bool is_offset_dim = absl::c_binary_search(offset_dims, i);
+ bool is_offset_dim = c_binary_search(offset_dims, i);
if (is_offset_dim) {
permutation.push_back(offset_idx_counter++);
} else {
@@ -374,8 +373,8 @@ StatusOr<bool> GatherExpander::Run(HloModule* module) {
std::vector<HloInstruction*> gather_instrs;
for (HloComputation* computation : module->MakeNonfusionComputations()) {
- absl::c_copy_if(computation->instructions(),
- std::back_inserter(gather_instrs), is_nontrivial_gather);
+ c_copy_if(computation->instructions(), std::back_inserter(gather_instrs),
+ is_nontrivial_gather);
}
for (HloInstruction* inst : gather_instrs) {
diff --git a/tensorflow/compiler/xla/service/gpu/BUILD b/tensorflow/compiler/xla/service/gpu/BUILD
index fd1e34a547..8ef72850dc 100644
--- a/tensorflow/compiler/xla/service/gpu/BUILD
+++ b/tensorflow/compiler/xla/service/gpu/BUILD
@@ -180,7 +180,6 @@ cc_library(
"//tensorflow/compiler/xla/service/llvm_ir:tuple_ops",
"//tensorflow/core:lib",
"//tensorflow/core:stream_executor_no_cuda",
- "@com_google_absl//absl/algorithm:container",
"@llvm//:core",
"@llvm//:support",
],
@@ -467,7 +466,6 @@ cc_library(
"//tensorflow/compiler/xla/service:hlo",
"//tensorflow/compiler/xla/service:multi_output_fusion",
"//tensorflow/core:lib",
- "@com_google_absl//absl/algorithm:container",
],
)
@@ -515,7 +513,6 @@ cc_library(
"//tensorflow/compiler/xla/service:hlo_cost_analysis",
"//tensorflow/compiler/xla/service:hlo_pass",
"//tensorflow/core:lib",
- "@com_google_absl//absl/algorithm:container",
],
)
diff --git a/tensorflow/compiler/xla/service/gpu/fusion_merger.cc b/tensorflow/compiler/xla/service/gpu/fusion_merger.cc
index 9b86e5315b..3cd30b754c 100644
--- a/tensorflow/compiler/xla/service/gpu/fusion_merger.cc
+++ b/tensorflow/compiler/xla/service/gpu/fusion_merger.cc
@@ -18,7 +18,6 @@ limitations under the License.
#include <algorithm>
#include <vector>
-#include "absl/algorithm/container.h"
#include "tensorflow/compiler/xla/service/gpu/instruction_fusion.h"
#include "tensorflow/compiler/xla/service/hlo_cost_analysis.h"
#include "tensorflow/compiler/xla/shape_util.h"
@@ -65,11 +64,10 @@ double CalculateBytesReadByFusionParameter(HloInstruction* param) {
// Slice for a more accurate estimate of bytes read.
double bytes = 0.0;
for (auto& instruction : instructions) {
- if (absl::c_all_of(
- instruction->users(), [](const HloInstruction* instruction) {
- return instruction->opcode() == HloOpcode::kSlice ||
- instruction->opcode() == HloOpcode::kDynamicSlice;
- })) {
+ if (c_all_of(instruction->users(), [](const HloInstruction* instruction) {
+ return instruction->opcode() == HloOpcode::kSlice ||
+ instruction->opcode() == HloOpcode::kDynamicSlice;
+ })) {
// All users are slice: accumulate bytes of all user slice instructions.
for (auto& user : instruction->users()) {
bytes += ShapeUtil::ByteSizeOf(user->shape());
@@ -225,7 +223,7 @@ Status FusionInstructionMerger::HandleFusion(HloInstruction* fusion) {
// Skip 'fusion' instruction if we cannot merge into all of its users.
// Merging into all users enables the removal of 'fusion' from the
// computation.
- if (!absl::c_all_of(fusion->users(), [](const HloInstruction* user) {
+ if (!c_all_of(fusion->users(), [](const HloInstruction* user) {
return user->opcode() == HloOpcode::kFusion &&
(user->fusion_kind() == HloInstruction::FusionKind::kLoop ||
user->fusion_kind() == HloInstruction::FusionKind::kInput);
@@ -243,11 +241,11 @@ Status FusionInstructionMerger::HandleFusion(HloInstruction* fusion) {
// If 'fusion' has just one user, then an earlier fusion pass chose not to
// fuse this producer/comsumer pair (likely because of expensive instruction
// re-use by the consumer), and so we honor that choice here as well.
- if (absl::c_any_of(fusion->fused_instructions(),
- [](const HloInstruction* instruction) {
- return instruction->opcode() != HloOpcode::kParameter &&
- GpuInstructionFusion::IsExpensive(*instruction);
- })) {
+ if (c_any_of(fusion->fused_instructions(),
+ [](const HloInstruction* instruction) {
+ return instruction->opcode() != HloOpcode::kParameter &&
+ GpuInstructionFusion::IsExpensive(*instruction);
+ })) {
VLOG(3) << "Not merging " << fusion->name()
<< ": Contains one or more expensive instructions.";
++num_fail_expensive_fused_instruction_;
diff --git a/tensorflow/compiler/xla/service/gpu/ir_emitter.cc b/tensorflow/compiler/xla/service/gpu/ir_emitter.cc
index 7111b53944..6675dbd3f9 100644
--- a/tensorflow/compiler/xla/service/gpu/ir_emitter.cc
+++ b/tensorflow/compiler/xla/service/gpu/ir_emitter.cc
@@ -21,7 +21,6 @@ limitations under the License.
#include "tensorflow/core/platform/logging.h"
// IWYU pragma: no_include "llvm/IR/Intrinsics.gen.inc"
-#include "absl/algorithm/container.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Instructions.h"
@@ -519,7 +518,7 @@ Status IrEmitter::HandleDot(HloInstruction* dot) {
// We don't have to iterate over the batch dimensions in both arrays, simplify
// the loop nest of the rhs.
for (int i = 0; i != dnums.lhs_batch_dimensions_size(); ++i) {
- DCHECK(absl::c_linear_search(dnums.lhs_batch_dimensions(), i));
+ DCHECK(c_linear_search(dnums.lhs_batch_dimensions(), i));
rhs_index[i] = lhs_index[i];
}
diff --git a/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc b/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc
index 71c30e19a2..1e81cbde35 100644
--- a/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc
+++ b/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc
@@ -21,7 +21,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.h"
-#include "absl/algorithm/container.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Function.h"
@@ -315,13 +314,13 @@ llvm::Type* GetIndexTypeForKernel(const HloInstruction* hlo, int64 launch_size,
};
// Check the size of input tensors
- if (!absl::c_all_of(unnested_hlo->operands(), hlo_shape_in_range)) {
+ if (!c_all_of(unnested_hlo->operands(), hlo_shape_in_range)) {
return i64_ty;
}
// Check the size of the internal result tensors
if (unnested_hlo->opcode() == HloOpcode::kFusion) {
- if (!absl::c_all_of(
+ if (!c_all_of(
unnested_hlo->fused_instructions_computation()->instructions(),
hlo_shape_in_range)) {
return i64_ty;
@@ -1739,7 +1738,7 @@ Status IrEmitterUnnested::HandleReduce(HloInstruction* reduce) {
Status IrEmitterUnnested::HandleTuple(HloInstruction* tuple) {
bool all_tuple_elements_have_buffer =
- absl::c_all_of(tuple->operands(), [&](HloInstruction* tuple_element) {
+ c_all_of(tuple->operands(), [&](HloInstruction* tuple_element) {
return ir_emitter_context_->buffer_assignment()
.GetUniqueTopLevelSlice(tuple_element)
.ok();
@@ -2323,10 +2322,10 @@ std::unique_ptr<KernelThunk> IrEmitterUnnested::BuildKernelThunk(
// We'll pass a pointer to each of the elements of `buffers` to our kernel, in
// this order.
std::vector<const BufferAllocation*> non_constant_buffers;
- absl::c_copy_if(buffers_needed, std::back_inserter(non_constant_buffers),
- [](const BufferAllocation* allocation) {
- return !allocation->is_constant();
- });
+ c_copy_if(buffers_needed, std::back_inserter(non_constant_buffers),
+ [](const BufferAllocation* allocation) {
+ return !allocation->is_constant();
+ });
std::sort(non_constant_buffers.begin(), non_constant_buffers.end(),
[](const BufferAllocation* a, const BufferAllocation* b) {
@@ -2583,7 +2582,7 @@ StatusOr<std::unique_ptr<Thunk>> IrEmitterUnnested::BuildInitializerThunk(
// MemzeroThunk.
ArraySlice<uint8> literal_bytes(
reinterpret_cast<const uint8*>(literal.untyped_data()), num_bytes);
- if (absl::c_all_of(literal_bytes, [](uint8 byte) { return byte == 0; })) {
+ if (c_all_of(literal_bytes, [](uint8 byte) { return byte == 0; })) {
return {
MakeUnique<MemzeroThunk>(GetAllocationSlice(*hlo, index), nullptr)};
}
@@ -3106,7 +3105,7 @@ LaunchDimensions IrEmitterUnnested::EmitHlo021Tile(
CeilOfRatio<int64>(output_dims_in_tiles[i], kTileSize);
}
const int64 num_tiles =
- absl::c_accumulate(output_dims_in_tiles, 1, std::multiplies<int64>());
+ c_accumulate(output_dims_in_tiles, 1, std::multiplies<int64>());
LaunchDimensions launch_dimensions(num_tiles, kThreadsPerTile);
llvm::Type* index_ty =
diff --git a/tensorflow/compiler/xla/service/gpu/multi_output_fusion.cc b/tensorflow/compiler/xla/service/gpu/multi_output_fusion.cc
index 34a479b289..c62bae0628 100644
--- a/tensorflow/compiler/xla/service/gpu/multi_output_fusion.cc
+++ b/tensorflow/compiler/xla/service/gpu/multi_output_fusion.cc
@@ -23,7 +23,6 @@ limitations under the License.
#include <string>
#include <utility>
-#include "absl/algorithm/container.h"
#include "tensorflow/compiler/xla/layout_util.h"
#include "tensorflow/compiler/xla/service/gpu/instruction_fusion.h"
#include "tensorflow/compiler/xla/service/gpu/ir_emission_utils.h"
@@ -132,7 +131,7 @@ bool ReduceFriendlyInputLayouts(HloInstruction* instr) {
max_rank_layout = &param->shape().layout();
}
}
- return absl::c_all_of(params, [&](HloInstruction* param) {
+ return c_all_of(params, [&](HloInstruction* param) {
return (ShapeUtil::Rank(param->shape()) < max_rank) ||
(LayoutUtil::Equal(param->shape().layout(), *max_rank_layout));
});
@@ -249,7 +248,7 @@ bool GpuMultiOutputFusion::DoProducerConsumerMultiOutputFusion() {
}
// Do not fuse a producer if the other operands of the fusion are
// reachable from the producer, this would create a cycle.
- if (absl::c_any_of(consumer_operands, [&](HloInstruction* operand) {
+ if (c_any_of(consumer_operands, [&](HloInstruction* operand) {
return producer != operand &&
reachability()->IsReachable(producer, operand);
})) {
@@ -269,7 +268,7 @@ bool GpuMultiOutputFusion::DoProducerConsumerMultiOutputFusion() {
for (auto& fusion_pair : potential_fusion_list) {
HloInstruction* producer = fusion_pair.first;
HloInstruction* consumer = fusion_pair.second;
- if (!absl::c_any_of(consumer->operands(), [&](HloInstruction* operand) {
+ if (!c_any_of(consumer->operands(), [&](HloInstruction* operand) {
return producer != operand &&
reachability()->IsReachable(producer, operand);
})) {
diff --git a/tensorflow/compiler/xla/service/hlo_computation.cc b/tensorflow/compiler/xla/service/hlo_computation.cc
index db853360f1..441288da1a 100644
--- a/tensorflow/compiler/xla/service/hlo_computation.cc
+++ b/tensorflow/compiler/xla/service/hlo_computation.cc
@@ -23,7 +23,6 @@ limitations under the License.
#include <set>
#include <sstream>
-#include "absl/algorithm/container.h"
#include "tensorflow/compiler/xla/layout_util.h"
#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
@@ -902,9 +901,9 @@ void HloComputation::UniquifyName(NameUniquer* name_uniquer) {
HloInstruction* HloComputation::GetInstructionWithName(
tensorflow::StringPiece name) {
auto instructions_in_computation = instructions();
- auto it = absl::c_find_if(
- instructions_in_computation,
- [&](HloInstruction* instr) { return instr->name() == name; });
+ auto it = c_find_if(instructions_in_computation, [&](HloInstruction* instr) {
+ return instr->name() == name;
+ });
return it == instructions_in_computation.end() ? nullptr : *it;
}
diff --git a/tensorflow/compiler/xla/service/hlo_creation_utils.cc b/tensorflow/compiler/xla/service/hlo_creation_utils.cc
index 83adaddba4..858992a326 100644
--- a/tensorflow/compiler/xla/service/hlo_creation_utils.cc
+++ b/tensorflow/compiler/xla/service/hlo_creation_utils.cc
@@ -14,7 +14,6 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/xla/service/hlo_creation_utils.h"
-#include "absl/algorithm/container.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
@@ -150,13 +149,13 @@ StatusOr<HloInstruction*> MakeConcatHlo(ArraySlice<HloInstruction*> operands,
CHECK_GT(operands.size(), 0);
HloComputation* computation = operands[0]->parent();
- CHECK(absl::c_all_of(operands, [&](HloInstruction* instr) {
+ CHECK(c_all_of(operands, [&](HloInstruction* instr) {
return instr->parent() == computation;
}));
std::vector<const Shape*> operand_shapes;
- absl::c_transform(operands, std::back_inserter(operand_shapes),
- [](HloInstruction* instr) { return &instr->shape(); });
+ c_transform(operands, std::back_inserter(operand_shapes),
+ [](HloInstruction* instr) { return &instr->shape(); });
TF_ASSIGN_OR_RETURN(Shape concat_shape, ShapeInference::InferConcatOpShape(
operand_shapes, dimension));
@@ -229,7 +228,7 @@ StatusOr<HloInstruction*> PrependDegenerateDims(HloInstruction* operand,
const Shape& operand_shape = operand->shape();
new_shape_dims.reserve(n + operand_shape.dimensions_size());
new_shape_dims.insert(new_shape_dims.begin(), n, 1);
- absl::c_copy(operand_shape.dimensions(), std::back_inserter(new_shape_dims));
+ c_copy(operand_shape.dimensions(), std::back_inserter(new_shape_dims));
return MakeReshapeHlo(new_shape_dims, operand);
}
@@ -241,7 +240,7 @@ StatusOr<HloInstruction*> ExpandFirstDimIntoNDims(
std::vector<int64> expanded_shape_dim_bounds;
expanded_shape_dim_bounds.reserve(expanded_dims.size() +
operand->shape().dimensions_size() - 1);
- absl::c_copy(expanded_dims, std::back_inserter(expanded_shape_dim_bounds));
+ c_copy(expanded_dims, std::back_inserter(expanded_shape_dim_bounds));
std::copy(operand->shape().dimensions().begin() + 1,
operand->shape().dimensions().end(),
std::back_inserter(expanded_shape_dim_bounds));
@@ -252,7 +251,7 @@ StatusOr<HloInstruction*> ExpandFirstDimIntoNDims(
StatusOr<HloInstruction*> ElideDegenerateDims(HloInstruction* operand,
ArraySlice<int64> dims_to_elide) {
- CHECK(absl::c_is_sorted(dims_to_elide));
+ CHECK(c_is_sorted(dims_to_elide));
const Shape& input_shape = operand->shape();
// First accumulate in reverse
@@ -269,7 +268,7 @@ StatusOr<HloInstruction*> ElideDegenerateDims(HloInstruction* operand,
}
}
- absl::c_reverse(new_shape_dim_bounds);
+ c_reverse(new_shape_dim_bounds);
Shape output_shape =
ShapeUtil::MakeShape(input_shape.element_type(), new_shape_dim_bounds);
return MakeReshapeHlo(output_shape, operand);
@@ -277,7 +276,7 @@ StatusOr<HloInstruction*> ElideDegenerateDims(HloInstruction* operand,
StatusOr<HloInstruction*> InsertDegenerateDims(
HloInstruction* operand, ArraySlice<int64> dims_to_insert) {
- CHECK(absl::c_is_sorted(dims_to_insert));
+ CHECK(c_is_sorted(dims_to_insert));
const Shape& operand_shape = operand->shape();
int64 output_shape_rank =
diff --git a/tensorflow/compiler/xla/service/hlo_evaluator.cc b/tensorflow/compiler/xla/service/hlo_evaluator.cc
index 0455c7f41a..36d6a2eed6 100644
--- a/tensorflow/compiler/xla/service/hlo_evaluator.cc
+++ b/tensorflow/compiler/xla/service/hlo_evaluator.cc
@@ -23,7 +23,6 @@ limitations under the License.
#include <utility>
#include <vector>
-#include "absl/algorithm/container.h"
#include "tensorflow/compiler/xla/index_util.h"
#include "tensorflow/compiler/xla/layout_util.h"
#include "tensorflow/compiler/xla/literal.h"
@@ -565,8 +564,7 @@ ShapeUtil::IndexIterationSpace IterationSpaceForOutputBatchIndices(
std::vector<int64> index_count;
index_count.reserve(output_rank);
for (int64 i = 0; i < output_rank; i++) {
- bool is_output_batch_dim =
- !absl::c_binary_search(dim_numbers.offset_dims(), i);
+ bool is_output_batch_dim = !c_binary_search(dim_numbers.offset_dims(), i);
index_count.push_back(is_output_batch_dim ? output_shape.dimensions(i) : 1);
}
@@ -583,11 +581,10 @@ ShapeUtil::IndexIterationSpace IterationSpaceForOutputOffsetIndices(
std::vector<int64> index_count(output_rank, 1);
int64 slice_sizes_idx = 0;
for (int64 i = 0; i < output_rank; i++) {
- bool is_output_window_dim =
- absl::c_binary_search(dim_numbers.offset_dims(), i);
+ bool is_output_window_dim = c_binary_search(dim_numbers.offset_dims(), i);
if (is_output_window_dim) {
- while (absl::c_binary_search(dim_numbers.collapsed_slice_dims(),
- slice_sizes_idx)) {
+ while (c_binary_search(dim_numbers.collapsed_slice_dims(),
+ slice_sizes_idx)) {
slice_sizes_idx++;
}
index_count[i] = slice_sizes[slice_sizes_idx++];
@@ -613,13 +610,13 @@ class OutputBatchIndexToInputIndex {
: dim_numbers_(*dim_numbers), start_indices_(*start_indices) {
for (int64 i = 0; i < output_shape.dimensions_size(); i++) {
output_dim_is_batch_dims_.push_back(
- !absl::c_binary_search(dim_numbers_.offset_dims(), i));
+ !c_binary_search(dim_numbers_.offset_dims(), i));
}
for (int64 i = 0; i < input_shape.dimensions_size(); i++) {
int64 index_of_input_dim_in_index_vector =
std::distance(dim_numbers_.start_index_map().begin(),
- absl::c_find(dim_numbers_.start_index_map(), i));
+ c_find(dim_numbers_.start_index_map(), i));
if (index_of_input_dim_in_index_vector ==
dim_numbers_.start_index_map_size()) {
input_dim_value_to_index_vector_.push_back(-1);
@@ -739,7 +736,7 @@ class OutputOffsetIndexToInputIndex {
std::vector<int64> window_index_to_output_index;
int64 output_index_count = 0;
for (int64 i = 0; i < output_shape.dimensions_size(); i++) {
- if (absl::c_binary_search(dim_numbers.offset_dims(), i)) {
+ if (c_binary_search(dim_numbers.offset_dims(), i)) {
window_index_to_output_index.push_back(output_index_count++);
} else {
output_index_count++;
@@ -748,7 +745,7 @@ class OutputOffsetIndexToInputIndex {
int64 window_dim_count = 0;
for (int64 i = 0; i < input_shape.dimensions_size(); i++) {
- if (absl::c_binary_search(dim_numbers.collapsed_slice_dims(), i)) {
+ if (c_binary_search(dim_numbers.collapsed_slice_dims(), i)) {
input_dim_value_to_output_index_.push_back(-1);
} else {
input_dim_value_to_output_index_.push_back(
diff --git a/tensorflow/compiler/xla/service/hlo_evaluator_typed_visitor.h b/tensorflow/compiler/xla/service/hlo_evaluator_typed_visitor.h
index a7c5d71da0..f62e6b74b1 100644
--- a/tensorflow/compiler/xla/service/hlo_evaluator_typed_visitor.h
+++ b/tensorflow/compiler/xla/service/hlo_evaluator_typed_visitor.h
@@ -16,7 +16,6 @@ limitations under the License.
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_HLO_EVALUATOR_TYPED_VISITOR_H_
#define TENSORFLOW_COMPILER_XLA_SERVICE_HLO_EVALUATOR_TYPED_VISITOR_H_
-#include "absl/algorithm/container.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/hlo_evaluator.h"
#include "tensorflow/compiler/xla/service/shape_inference.h"
@@ -1826,7 +1825,7 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
std::vector<int64> index_count(updates_rank, 1);
for (int64 i = 0; i < updates_rank; i++) {
bool is_update_scatter_dim =
- !absl::c_binary_search(dim_numbers.update_window_dims(), i);
+ !c_binary_search(dim_numbers.update_window_dims(), i);
if (is_update_scatter_dim) {
index_count[i] = updates_shape.dimensions(i);
}
@@ -1845,7 +1844,7 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
std::vector<int64> index_count(updates_rank, 1);
for (int64 i = 0; i < updates_rank; i++) {
bool is_update_window_dim =
- absl::c_binary_search(dim_numbers.update_window_dims(), i);
+ c_binary_search(dim_numbers.update_window_dims(), i);
if (is_update_window_dim) {
index_count[i] = updates_shape.dimensions(i);
}
@@ -1872,7 +1871,7 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
: dim_numbers_(*dim_numbers), scatter_indices_(*scatter_indices) {
for (int64 i = 0; i < updates_shape.dimensions_size(); i++) {
update_dim_is_scatter_dims_.push_back(
- !absl::c_binary_search(dim_numbers_.update_window_dims(), i));
+ !c_binary_search(dim_numbers_.update_window_dims(), i));
}
for (int64 i = 0; i < input_shape.dimensions_size(); i++) {
@@ -2002,7 +2001,7 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
std::vector<int64> window_index_to_update_index;
int64 update_index_count = 0;
for (int64 i = 0; i < updates_shape.dimensions_size(); i++) {
- if (absl::c_binary_search(dim_numbers.update_window_dims(), i)) {
+ if (c_binary_search(dim_numbers.update_window_dims(), i)) {
window_index_to_update_index.push_back(update_index_count++);
} else {
update_index_count++;
@@ -2011,7 +2010,7 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
int64 window_dim_count = 0;
for (int64 i = 0; i < input_shape.dimensions_size(); i++) {
- if (absl::c_binary_search(dim_numbers.inserted_window_dims(), i)) {
+ if (c_binary_search(dim_numbers.inserted_window_dims(), i)) {
input_dim_value_to_update_index_.push_back(-1);
} else {
input_dim_value_to_update_index_.push_back(
diff --git a/tensorflow/compiler/xla/service/hlo_execution_profile.cc b/tensorflow/compiler/xla/service/hlo_execution_profile.cc
index f554401787..c3ccbf0f0c 100644
--- a/tensorflow/compiler/xla/service/hlo_execution_profile.cc
+++ b/tensorflow/compiler/xla/service/hlo_execution_profile.cc
@@ -19,7 +19,6 @@ limitations under the License.
#include <utility>
#include <vector>
-#include "absl/algorithm/container.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_module.h"
#include "tensorflow/compiler/xla/service/human_readable_profile_builder.h"
@@ -68,11 +67,11 @@ std::unique_ptr<HloProfilePrinterData> CreateHloProfilePrinterData(
// The profile indices were computed deterministically in
// HloProfileIndexMap::HloProfileIndexMap.
- absl::c_sort(computation_and_profile_idx_list,
- [](const std::pair<const HloComputation*, int64>& left,
- const std::pair<const HloComputation*, int64>& right) {
- return left.second < right.second;
- });
+ c_sort(computation_and_profile_idx_list,
+ [](const std::pair<const HloComputation*, int64>& left,
+ const std::pair<const HloComputation*, int64>& right) {
+ return left.second < right.second;
+ });
for (const auto& pair : computation_and_profile_idx_list) {
CHECK_LT(pair.second, profile_counters_size);
diff --git a/tensorflow/compiler/xla/service/hlo_instruction.cc b/tensorflow/compiler/xla/service/hlo_instruction.cc
index 2b81213509..57e75cf931 100644
--- a/tensorflow/compiler/xla/service/hlo_instruction.cc
+++ b/tensorflow/compiler/xla/service/hlo_instruction.cc
@@ -21,7 +21,6 @@ limitations under the License.
#include <unordered_set>
#include <utility>
-#include "absl/algorithm/container.h"
#include "tensorflow/compiler/xla/layout_util.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/protobuf_util.h"
@@ -380,7 +379,7 @@ StatusOr<std::unique_ptr<HloInstruction>> HloInstruction::CreateFromProto(
<< "DynamicSlice instruction should have 2 operands but sees "
<< proto.operand_ids_size();
std::vector<int64> slice_sizes(proto.dynamic_slice_sizes_size());
- absl::c_copy(proto.dynamic_slice_sizes(), slice_sizes.begin());
+ c_copy(proto.dynamic_slice_sizes(), slice_sizes.begin());
instruction = CreateDynamicSlice(proto.shape(), operands(0), operands(1),
slice_sizes);
break;
diff --git a/tensorflow/compiler/xla/service/hlo_instructions.cc b/tensorflow/compiler/xla/service/hlo_instructions.cc
index 0751aacdd6..4fdf4360e6 100644
--- a/tensorflow/compiler/xla/service/hlo_instructions.cc
+++ b/tensorflow/compiler/xla/service/hlo_instructions.cc
@@ -17,7 +17,6 @@ limitations under the License.
#include <deque>
-#include "absl/algorithm/container.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/hlo_casting_utils.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
@@ -1974,7 +1973,7 @@ HloGatherInstruction::HloGatherInstruction(
AppendOperand(start_indices);
gather_dimension_numbers_ =
MakeUnique<GatherDimensionNumbers>(gather_dim_numbers);
- absl::c_copy(slice_sizes, std::back_inserter(gather_slice_sizes_));
+ c_copy(slice_sizes, std::back_inserter(gather_slice_sizes_));
}
string HloGatherInstruction::GatherDimensionNumbersToString() const {
diff --git a/tensorflow/compiler/xla/service/hlo_module.cc b/tensorflow/compiler/xla/service/hlo_module.cc
index 76f8236048..55ff073d3f 100644
--- a/tensorflow/compiler/xla/service/hlo_module.cc
+++ b/tensorflow/compiler/xla/service/hlo_module.cc
@@ -22,7 +22,6 @@ limitations under the License.
#include <unordered_set>
#include <utility>
-#include "absl/algorithm/container.h"
#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/shape_util.h"
@@ -539,9 +538,9 @@ uint64 HloModule::RandomNew64() const {
HloComputation* HloModule::GetComputationWithName(
tensorflow::StringPiece name) {
auto computations_in_module = computations();
- auto it = absl::c_find_if(
- computations_in_module,
- [&](HloComputation* computation) { return computation->name() == name; });
+ auto it = c_find_if(computations_in_module, [&](HloComputation* computation) {
+ return computation->name() == name;
+ });
return it == computations_in_module.end() ? nullptr : *it;
}
diff --git a/tensorflow/compiler/xla/service/hlo_parser.cc b/tensorflow/compiler/xla/service/hlo_parser.cc
index e48c9d2c41..ab57a8b07f 100644
--- a/tensorflow/compiler/xla/service/hlo_parser.cc
+++ b/tensorflow/compiler/xla/service/hlo_parser.cc
@@ -15,7 +15,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/hlo_parser.h"
-#include "absl/algorithm/container.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/hlo_domain_metadata.h"
@@ -636,13 +635,12 @@ bool HloParser::ParseInstruction(HloComputation::Builder* builder,
}
std::vector<ReplicaGroup> replica_groups;
if (tmp_groups) {
- absl::c_transform(
- *tmp_groups, std::back_inserter(replica_groups),
- [](const std::vector<int64>& ids) {
- ReplicaGroup group;
- *group.mutable_replica_ids() = {ids.begin(), ids.end()};
- return group;
- });
+ c_transform(*tmp_groups, std::back_inserter(replica_groups),
+ [](const std::vector<int64>& ids) {
+ ReplicaGroup group;
+ *group.mutable_replica_ids() = {ids.begin(), ids.end()};
+ return group;
+ });
}
instruction = builder->AddInstruction(HloInstruction::CreateAllToAll(
shape, operands, replica_groups, barrier ? *barrier : ""));
diff --git a/tensorflow/compiler/xla/service/indexed_array_analysis.cc b/tensorflow/compiler/xla/service/indexed_array_analysis.cc
index 39dff567d4..8d17c03afc 100644
--- a/tensorflow/compiler/xla/service/indexed_array_analysis.cc
+++ b/tensorflow/compiler/xla/service/indexed_array_analysis.cc
@@ -14,7 +14,6 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/xla/service/indexed_array_analysis.h"
-#include "absl/algorithm/container.h"
#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/service/hlo_evaluator.h"
#include "tensorflow/compiler/xla/util.h"
@@ -291,13 +290,13 @@ StatusOr<Analysis::Array*> IndexedArrayAnalysis::ComputeArrayForGather(
int64 source_dim = dim_numbers.start_index_map(0);
std::vector<int64> output_dims;
for (int64 i = 0, e = shape.dimensions_size(); i < e; i++) {
- if (!absl::c_binary_search(dim_numbers.offset_dims(), i)) {
+ if (!c_binary_search(dim_numbers.offset_dims(), i)) {
output_dims.push_back(i);
}
}
if (auto* indexed = dynamic_cast<ScalarIndexedArray*>(source)) {
- if (absl::c_linear_search(indexed->output_dims(), source_dim)) {
+ if (c_linear_search(indexed->output_dims(), source_dim)) {
return FoldGatherOfGather(indexed, indices, source_dim, output_dims,
shape);
}
@@ -315,7 +314,7 @@ namespace {
// [values.begin()+index, values.end()) is equal to `product`. If there is no
// such index, return -1. All integers in `values` must be positive.
int64 FindSuffixWithProduct(ArraySlice<int64> values, int64 product) {
- DCHECK(absl::c_all_of(values, [](int64 value) { return value > 0; }));
+ DCHECK(c_all_of(values, [](int64 value) { return value > 0; }));
int64 current_product = 1;
int64 i;
@@ -389,26 +388,26 @@ std::vector<ReshapePassthroughDimPair> ComputeReshapePassthroughDimPairs(
result_subarray_size *= result_shape[result_dim];
}
- absl::c_reverse(result);
+ c_reverse(result);
if (VLOG_IS_ON(3)) {
std::vector<string> result_strings;
- absl::c_transform(result, std::back_inserter(result_strings),
- [](ReshapePassthroughDimPair value) {
- return tensorflow::strings::StrCat(
- value.result_dim, "->", value.operand_dim);
- });
+ c_transform(result, std::back_inserter(result_strings),
+ [](ReshapePassthroughDimPair value) {
+ return tensorflow::strings::StrCat(value.result_dim, "->",
+ value.operand_dim);
+ });
VLOG(3) << "For a reshape from [" << Join(operand_shape, ",") << "] to ["
<< Join(result_shape, ",") << "] passthrough indices are ["
<< Join(result_strings, ",") << "] (legend: `result`->`operand`)";
}
- DCHECK(absl::c_is_sorted(
+ DCHECK(c_is_sorted(
result, [](ReshapePassthroughDimPair lhs, ReshapePassthroughDimPair rhs) {
return lhs.result_dim < rhs.result_dim;
}));
- DCHECK(absl::c_is_sorted(
+ DCHECK(c_is_sorted(
result, [](ReshapePassthroughDimPair lhs, ReshapePassthroughDimPair rhs) {
return lhs.operand_dim < rhs.operand_dim;
}));
@@ -420,20 +419,20 @@ std::vector<ReshapePassthroughDimPair> ComputeReshapePassthroughDimPairs(
// `passthrough_dims`.
bool IsReshapePassthroughOperandDim(
ArraySlice<ReshapePassthroughDimPair> passthrough_dims, int64 dim) {
- return absl::c_any_of(passthrough_dims,
- [&](ReshapePassthroughDimPair passthrough_dim_pair) {
- return passthrough_dim_pair.operand_dim == dim;
- });
+ return c_any_of(passthrough_dims,
+ [&](ReshapePassthroughDimPair passthrough_dim_pair) {
+ return passthrough_dim_pair.operand_dim == dim;
+ });
}
// Maps `operand_dim` which must be an passthrough operand dimension to its
// corresponding passthrough result dimension based on `passthrough_dims`.
int64 MapPassthroughOperandDimToResultDim(
ArraySlice<ReshapePassthroughDimPair> passthrough_dims, int64 operand_dim) {
- auto it = absl::c_find_if(
- passthrough_dims, [&](ReshapePassthroughDimPair passthrough_dim_pair) {
- return passthrough_dim_pair.operand_dim == operand_dim;
- });
+ auto it = c_find_if(passthrough_dims,
+ [&](ReshapePassthroughDimPair passthrough_dim_pair) {
+ return passthrough_dim_pair.operand_dim == operand_dim;
+ });
CHECK(it != passthrough_dims.end());
return it->result_dim;
}
@@ -454,8 +453,8 @@ int64 FindSourcePositionForPassthroughResultDim(ArraySlice<int64> operand_shape,
Shape StripDegenerateDimensions(const Shape& shape) {
DimensionVector new_dims;
- absl::c_copy_if(shape.dimensions(), std::back_inserter(new_dims),
- [](int64 dim) { return dim != 1; });
+ c_copy_if(shape.dimensions(), std::back_inserter(new_dims),
+ [](int64 dim) { return dim != 1; });
return ShapeUtil::MakeShape(shape.element_type(), new_dims);
}
}; // namespace
@@ -553,8 +552,8 @@ StatusOr<ScalarIndexedArray*> IndexedArrayAnalysis::ReshapeToAddDegenerateDims(
}();
DimensionVector new_result_shape_dims;
- absl::c_copy(operand->shape().dimensions(),
- std::back_inserter(new_result_shape_dims));
+ c_copy(operand->shape().dimensions(),
+ std::back_inserter(new_result_shape_dims));
for (int64 degenerate_dim : degenerate_dims) {
InsertAt(&new_result_shape_dims, degenerate_dim, 1);
}
@@ -695,8 +694,8 @@ IndexedArrayAnalysis::FoldReshapeOfGatherNoDegenerateDims(
operand_dim);
};
- if (!absl::c_all_of(scalar_indexed->output_dims(),
- is_reshape_passthrough_operand_dim)) {
+ if (!c_all_of(scalar_indexed->output_dims(),
+ is_reshape_passthrough_operand_dim)) {
VLOG(3) << "Not all output dims are passthrough dims "
<< ToString(scalar_indexed);
return nullptr;
@@ -764,8 +763,8 @@ IndexedArrayAnalysis::FoldReshapeOfGatherNoDegenerateDims(
&new_scalar_indexed_source_shape, source_dim_for_new_scalar_indexed_node,
scalar_indexed_source_shape.dimensions(scalar_indexed->source_dim()));
- CHECK_EQ(absl::c_accumulate(new_scalar_indexed_source_shape, 1LL,
- std::multiplies<int64>()),
+ CHECK_EQ(c_accumulate(new_scalar_indexed_source_shape, 1LL,
+ std::multiplies<int64>()),
ShapeUtil::ElementsIn(scalar_indexed_source_shape));
CHECK(IsReshapePassthroughOperandDim(
@@ -781,9 +780,9 @@ IndexedArrayAnalysis::FoldReshapeOfGatherNoDegenerateDims(
};
std::vector<int64> output_dims_for_new_scalar_indexed_node;
- absl::c_transform(scalar_indexed->output_dims(),
- std::back_inserter(output_dims_for_new_scalar_indexed_node),
- map_passthrough_operand_dim_to_result_dim);
+ c_transform(scalar_indexed->output_dims(),
+ std::back_inserter(output_dims_for_new_scalar_indexed_node),
+ map_passthrough_operand_dim_to_result_dim);
TF_ASSIGN_OR_RETURN(const Literal* new_scalar_indexed_source_literal,
TakeOwnership(scalar_indexed->literal().Reshape(
@@ -874,12 +873,11 @@ IndexedArrayAnalysis::ComputeArrayForElementwiseBinaryOp(HloOpcode opcode,
ArraySlice<int64> broadcast_dims = broadcast_instr->dimensions();
auto is_broadcasted_dim = [&](int64 output_dim) {
- return absl::c_find(broadcast_dims, output_dim) == broadcast_dims.end();
+ return c_find(broadcast_dims, output_dim) == broadcast_dims.end();
};
// All of the output dims must be "broadcasted" dims for the other operand.
- if (!absl::c_all_of(scalar_indexed_const->output_dims(),
- is_broadcasted_dim)) {
+ if (!c_all_of(scalar_indexed_const->output_dims(), is_broadcasted_dim)) {
return nullptr;
}
diff --git a/tensorflow/compiler/xla/service/instruction_fusion.cc b/tensorflow/compiler/xla/service/instruction_fusion.cc
index 2fd2214806..f33942d679 100644
--- a/tensorflow/compiler/xla/service/instruction_fusion.cc
+++ b/tensorflow/compiler/xla/service/instruction_fusion.cc
@@ -21,7 +21,6 @@ limitations under the License.
#include <numeric>
#include <vector>
-#include "absl/algorithm/container.h"
#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
#include "tensorflow/core/lib/core/errors.h"
@@ -498,7 +497,7 @@ HloInstruction* InstructionFusion::FuseIntoMultiOutput(
bool InstructionFusion::MultiOutputFusionCreatesCycle(
HloInstruction* producer, HloInstruction* consumer) {
- return absl::c_any_of(
+ return c_any_of(
consumer->operands(), [&](const HloInstruction* consumer_operand) {
// The fusion algorithm traverses the HLO graph in reverse post order.
// Thus `cosumers` is visited before its operands (including
diff --git a/tensorflow/compiler/xla/service/llvm_ir/BUILD b/tensorflow/compiler/xla/service/llvm_ir/BUILD
index ce2d6678a5..cdd3daf73b 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/BUILD
+++ b/tensorflow/compiler/xla/service/llvm_ir/BUILD
@@ -88,7 +88,6 @@ cc_library(
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/core:lib",
- "@com_google_absl//absl/algorithm:container",
"@llvm//:core",
],
)
diff --git a/tensorflow/compiler/xla/service/llvm_ir/ir_array.h b/tensorflow/compiler/xla/service/llvm_ir/ir_array.h
index cbfd2e7012..28ca793e3e 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/ir_array.h
+++ b/tensorflow/compiler/xla/service/llvm_ir/ir_array.h
@@ -19,7 +19,6 @@ limitations under the License.
#include <map>
#include <vector>
-#include "absl/algorithm/container.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Value.h"
#include "tensorflow/compiler/xla/map_util.h"
@@ -82,7 +81,7 @@ class IrArray {
}
}
CHECK_NE(index_type_, nullptr);
- CHECK(absl::c_all_of(multidim, [&](llvm::Value* v) {
+ CHECK(c_all_of(multidim, [&](llvm::Value* v) {
return index_type_ == v->getType();
}));
}
diff --git a/tensorflow/compiler/xla/service/reshape_mover.cc b/tensorflow/compiler/xla/service/reshape_mover.cc
index 4df746fca9..ca86c5d13e 100644
--- a/tensorflow/compiler/xla/service/reshape_mover.cc
+++ b/tensorflow/compiler/xla/service/reshape_mover.cc
@@ -38,8 +38,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/reshape_mover.h"
#include <algorithm>
-
-#include "absl/algorithm/container.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
@@ -376,7 +374,7 @@ StatusOr<bool> TryReshapeMoveOnCandidates(
removed = false;
for (auto operand : nontrivial_operands) {
- if (absl::c_any_of(operand->users(), [&](HloInstruction* user) {
+ if (c_any_of(operand->users(), [&](HloInstruction* user) {
return !reshape_candidates->count(user);
})) {
for (auto* user : operand->users()) {
diff --git a/tensorflow/compiler/xla/service/scatter_expander.cc b/tensorflow/compiler/xla/service/scatter_expander.cc
index 338f0c09e9..45ca731153 100644
--- a/tensorflow/compiler/xla/service/scatter_expander.cc
+++ b/tensorflow/compiler/xla/service/scatter_expander.cc
@@ -15,7 +15,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/scatter_expander.h"
-#include "absl/algorithm/container.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_creation_utils.h"
@@ -93,7 +92,7 @@ static StatusOr<HloInstruction*> PermuteScatterAndWindowDims(
permutation.reserve(updates_rank);
for (int64 i = 0; i < updates_rank; ++i) {
- bool is_scatter_dim = !absl::c_binary_search(update_window_dims, i);
+ bool is_scatter_dim = !c_binary_search(update_window_dims, i);
if (is_scatter_dim) {
permutation.push_back(i);
}
diff --git a/tensorflow/compiler/xla/service/shape_inference.cc b/tensorflow/compiler/xla/service/shape_inference.cc
index ec6aa6df55..cc1ec1704e 100644
--- a/tensorflow/compiler/xla/service/shape_inference.cc
+++ b/tensorflow/compiler/xla/service/shape_inference.cc
@@ -21,7 +21,6 @@ limitations under the License.
#include <set>
#include <string>
-#include "absl/algorithm/container.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/types.h"
@@ -2495,13 +2494,13 @@ static Status ValidateGatherDimensionNumbers(
const Shape& input_shape,
tensorflow::gtl::ArraySlice<int64> start_indices_shape,
const GatherDimensionNumbers& dim_numbers) {
- if (!absl::c_is_sorted(dim_numbers.offset_dims())) {
+ if (!c_is_sorted(dim_numbers.offset_dims())) {
return InvalidArgument(
"Output window dimensions in gather op must be ascending; got: %s.",
Join(dim_numbers.offset_dims(), ", ").c_str());
}
- if (absl::c_adjacent_find(dim_numbers.offset_dims()) !=
+ if (c_adjacent_find(dim_numbers.offset_dims()) !=
dim_numbers.offset_dims().end()) {
return InvalidArgument(
"Output window dimensions in gather op must not repeat; got: %s.",
@@ -2547,10 +2546,9 @@ static Status ValidateGatherDimensionNumbers(
dim_numbers.start_index_map().begin(),
dim_numbers.start_index_map().end());
- absl::c_sort(sorted_start_index_map);
+ c_sort(sorted_start_index_map);
- if (absl::c_adjacent_find(sorted_start_index_map) !=
- sorted_start_index_map.end()) {
+ if (c_adjacent_find(sorted_start_index_map) != sorted_start_index_map.end()) {
return InvalidArgument(
"Repeated dimensions are not allowed in start_index_map; "
"got: %s.",
@@ -2566,13 +2564,13 @@ static Status ValidateGatherDimensionNumbers(
}
}
- if (!absl::c_is_sorted(dim_numbers.collapsed_slice_dims())) {
+ if (!c_is_sorted(dim_numbers.collapsed_slice_dims())) {
return InvalidArgument(
"collapsed_slice_dims in gather op must be sorted; got: %s",
Join(dim_numbers.collapsed_slice_dims(), ", ").c_str());
}
- if (absl::c_adjacent_find(dim_numbers.collapsed_slice_dims()) !=
+ if (c_adjacent_find(dim_numbers.collapsed_slice_dims()) !=
dim_numbers.collapsed_slice_dims().end()) {
return InvalidArgument(
"Repeated dimensions not allowed in collapsed_slice_dims in gather op; "
@@ -2615,8 +2613,8 @@ static Status ValidateGatherDimensionNumbers(
std::vector<int64> expanded_start_indices_shape;
expanded_start_indices_shape.reserve(start_indices_shape.dimensions_size());
- absl::c_copy(start_indices_shape.dimensions(),
- std::back_inserter(expanded_start_indices_shape));
+ c_copy(start_indices_shape.dimensions(),
+ std::back_inserter(expanded_start_indices_shape));
if (expanded_start_indices_shape.size() ==
gather_dim_numbers.index_vector_dim()) {
expanded_start_indices_shape.push_back(1);
@@ -2672,11 +2670,10 @@ static Status ValidateGatherDimensionNumbers(
output_dim_bounds.reserve(result_rank);
for (int64 i = 0; i < result_rank; i++) {
int64 current_bound;
- bool is_window_index =
- absl::c_binary_search(gather_dim_numbers.offset_dims(), i);
+ bool is_window_index = c_binary_search(gather_dim_numbers.offset_dims(), i);
if (is_window_index) {
- while (absl::c_binary_search(gather_dim_numbers.collapsed_slice_dims(),
- offset_dims_seen)) {
+ while (c_binary_search(gather_dim_numbers.collapsed_slice_dims(),
+ offset_dims_seen)) {
offset_dims_seen++;
}
current_bound = slice_sizes[offset_dims_seen++];
@@ -2700,12 +2697,12 @@ Status ValidateScatterDimensionNumbers(
tensorflow::gtl::ArraySlice<int64> scatter_indices_shape,
const Shape& updates_shape, const ScatterDimensionNumbers& dim_numbers) {
// Validate update_window_dims in ScatterDimensionNumbers.
- if (!absl::c_is_sorted(dim_numbers.update_window_dims())) {
+ if (!c_is_sorted(dim_numbers.update_window_dims())) {
return InvalidArgument(
"update_window_dims in scatter op must be sorted; got: %s.",
Join(dim_numbers.update_window_dims(), ", ").c_str());
}
- if (absl::c_adjacent_find(dim_numbers.update_window_dims()) !=
+ if (c_adjacent_find(dim_numbers.update_window_dims()) !=
dim_numbers.update_window_dims().end()) {
return InvalidArgument(
"update_window_dims in scatter op must not repeat; got: %s.",
@@ -2722,12 +2719,12 @@ Status ValidateScatterDimensionNumbers(
}
// Validate inserted_window_dims in ScatterDimensionNumbers.
- if (!absl::c_is_sorted(dim_numbers.inserted_window_dims())) {
+ if (!c_is_sorted(dim_numbers.inserted_window_dims())) {
return InvalidArgument(
"inserted_window_dims in scatter op must be sorted; got: %s.",
Join(dim_numbers.inserted_window_dims(), ", ").c_str());
}
- if (absl::c_adjacent_find(dim_numbers.inserted_window_dims()) !=
+ if (c_adjacent_find(dim_numbers.inserted_window_dims()) !=
dim_numbers.inserted_window_dims().end()) {
return InvalidArgument(
"inserted_window_dims in scatter op must not repeat; got: %s.",
@@ -2767,8 +2764,8 @@ Status ValidateScatterDimensionNumbers(
std::vector<int64> sorted_scatter_dims_to_operand_dims(
dim_numbers.scatter_dims_to_operand_dims().begin(),
dim_numbers.scatter_dims_to_operand_dims().end());
- absl::c_sort(sorted_scatter_dims_to_operand_dims);
- if (absl::c_adjacent_find(sorted_scatter_dims_to_operand_dims) !=
+ c_sort(sorted_scatter_dims_to_operand_dims);
+ if (c_adjacent_find(sorted_scatter_dims_to_operand_dims) !=
sorted_scatter_dims_to_operand_dims.end()) {
return InvalidArgument(
"Repeated dimensions not allowed in scatter_dims_to_operand_dims; "
@@ -2860,7 +2857,7 @@ Status ValidateScatterDimensionNumbers(
int64 scatter_dims_seen = 0;
for (int64 i = 0; i < ShapeUtil::Rank(updates_shape); ++i) {
bool is_update_window_dim =
- absl::c_binary_search(scatter_dim_numbers.update_window_dims(), i);
+ c_binary_search(scatter_dim_numbers.update_window_dims(), i);
if (is_update_window_dim) {
continue;
}
diff --git a/tensorflow/compiler/xla/service/while_loop_constant_sinking.cc b/tensorflow/compiler/xla/service/while_loop_constant_sinking.cc
index aab1180662..62af45128a 100644
--- a/tensorflow/compiler/xla/service/while_loop_constant_sinking.cc
+++ b/tensorflow/compiler/xla/service/while_loop_constant_sinking.cc
@@ -14,7 +14,6 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/xla/service/while_loop_constant_sinking.h"
-#include "absl/algorithm/container.h"
#include "tensorflow/compiler/xla/service/while_util.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/lib/gtl/flatmap.h"
@@ -33,7 +32,7 @@ static Status ReplaceUsesWhileKeepingLoopInvariance(
std::vector<HloInstruction*> users;
users.reserve(old_instr->user_count());
- absl::c_copy(old_instr->users(), std::back_inserter(users));
+ c_copy(old_instr->users(), std::back_inserter(users));
for (auto* user : users) {
for (int64 i = 0, e = user->operand_count(); i < e; i++) {
@@ -109,10 +108,10 @@ StatusOr<bool> WhileLoopConstantSinking::Run(HloModule* module) {
//
// This will let us sink the constant into the outer while first and then
// into the inner while in a single run of this pass.
- absl::c_copy_if(comp->instructions(), std::back_inserter(while_instrs),
- [](const HloInstruction* instr) {
- return instr->opcode() == HloOpcode::kWhile;
- });
+ c_copy_if(comp->instructions(), std::back_inserter(while_instrs),
+ [](const HloInstruction* instr) {
+ return instr->opcode() == HloOpcode::kWhile;
+ });
}
for (HloInstruction* while_instr : while_instrs) {
diff --git a/tensorflow/compiler/xla/service/while_loop_invariant_code_motion.cc b/tensorflow/compiler/xla/service/while_loop_invariant_code_motion.cc
index cb132d4f16..09ddcffb22 100644
--- a/tensorflow/compiler/xla/service/while_loop_invariant_code_motion.cc
+++ b/tensorflow/compiler/xla/service/while_loop_invariant_code_motion.cc
@@ -14,7 +14,6 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/xla/service/while_loop_invariant_code_motion.h"
-#include "absl/algorithm/container.h"
#include "tensorflow/compiler/xla/service/tuple_util.h"
#include "tensorflow/compiler/xla/service/while_util.h"
#include "tensorflow/compiler/xla/util.h"
@@ -66,8 +65,8 @@ static void CreateLoopInvariantCopy(
};
InlinedVector<HloInstruction*, 4> new_operands;
- absl::c_transform(old_instruction->operands(),
- std::back_inserter(new_operands), get_new_operand);
+ c_transform(old_instruction->operands(), std::back_inserter(new_operands),
+ get_new_operand);
HloInstruction* new_instruction =
parent_of_while->AddInstruction(old_instruction->CloneWithNewOperands(
@@ -198,7 +197,7 @@ WhileLoopInvariantCodeMotion::TryHoistingInvariantInstructionsFromWhileBody(
op->opcode() == HloOpcode::kConstant;
};
- if (!absl::c_all_of(instruction->operands(), is_invariant)) {
+ if (!c_all_of(instruction->operands(), is_invariant)) {
continue;
}
@@ -258,10 +257,10 @@ StatusOr<bool> WhileLoopInvariantCodeMotion::Run(HloModule* module) {
bool changed = false;
std::vector<HloInstruction*> while_instrs;
for (auto* comp : module->computations()) {
- absl::c_copy_if(comp->instructions(), std::back_inserter(while_instrs),
- [](const HloInstruction* instr) {
- return instr->opcode() == HloOpcode::kWhile;
- });
+ c_copy_if(comp->instructions(), std::back_inserter(while_instrs),
+ [](const HloInstruction* instr) {
+ return instr->opcode() == HloOpcode::kWhile;
+ });
}
for (HloInstruction* while_instr : while_instrs) {
diff --git a/tensorflow/compiler/xla/service/while_util.cc b/tensorflow/compiler/xla/service/while_util.cc
index 52d9c3e5ae..1ef17b9d7d 100644
--- a/tensorflow/compiler/xla/service/while_util.cc
+++ b/tensorflow/compiler/xla/service/while_util.cc
@@ -14,7 +14,6 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/xla/service/while_util.h"
-#include "absl/algorithm/container.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_creation_utils.h"
@@ -207,7 +206,7 @@ static StatusOr<HloInstruction*> MakeInitTupleFromInitValues(
HloInstruction* zero = computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(0)));
init_values_with_indvar.push_back(zero);
- absl::c_copy(init_values, std::back_inserter(init_values_with_indvar));
+ c_copy(init_values, std::back_inserter(init_values_with_indvar));
return computation->AddInstruction(
HloInstruction::CreateTuple(init_values_with_indvar));
}
@@ -216,9 +215,8 @@ static Shape MakeLoopStateShape(const WhileUtil::LoopStateTy& init_values) {
std::vector<Shape> loop_state_shape_components;
loop_state_shape_components.reserve(init_values.size() + 1);
loop_state_shape_components.push_back(ShapeUtil::MakeShape(S32, {}));
- absl::c_transform(init_values,
- std::back_inserter(loop_state_shape_components),
- [](HloInstruction* instr) { return instr->shape(); });
+ c_transform(init_values, std::back_inserter(loop_state_shape_components),
+ [](HloInstruction* instr) { return instr->shape(); });
return ShapeUtil::MakeTupleShape(loop_state_shape_components);
}
diff --git a/tensorflow/compiler/xla/service/while_util_test.cc b/tensorflow/compiler/xla/service/while_util_test.cc
index 5e69419333..2ccb919acf 100644
--- a/tensorflow/compiler/xla/service/while_util_test.cc
+++ b/tensorflow/compiler/xla/service/while_util_test.cc
@@ -15,7 +15,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/while_util.h"
-#include "absl/algorithm/container.h"
#include "tensorflow/compiler/xla/service/hlo_matchers.h"
#include "tensorflow/compiler/xla/service/hlo_parser.h"
#include "tensorflow/compiler/xla/test.h"
@@ -207,7 +206,7 @@ ENTRY main {
auto is_while = [](const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kWhile;
};
- EXPECT_EQ(absl::c_count_if(main->instructions(), is_while), 1);
+ EXPECT_EQ(c_count_if(main->instructions(), is_while), 1);
}
} // namespace
} // namespace xla