diff options
author | Benjamin Kramer <kramerb@google.com> | 2018-10-01 13:43:49 -0700 |
---|---|---|
committer | TensorFlower Gardener <gardener@tensorflow.org> | 2018-10-01 13:53:07 -0700 |
commit | 3039a4694e22674b502257ae34b0a5b614a631f3 (patch) | |
tree | 423fdfa7a2e7dd2740af97accfe848bc97b335d0 /tensorflow/compiler/xla/service/hlo_schedule.cc | |
parent | 44acd839c57494860666c799afd24360f1df3bed (diff) |
[XLA] Migrate from gtl::FlatMap to absl::flat_hash_map
PiperOrigin-RevId: 215272497
Diffstat (limited to 'tensorflow/compiler/xla/service/hlo_schedule.cc')
-rw-r--r-- | tensorflow/compiler/xla/service/hlo_schedule.cc | 19 |
1 files changed, 9 insertions, 10 deletions
diff --git a/tensorflow/compiler/xla/service/hlo_schedule.cc b/tensorflow/compiler/xla/service/hlo_schedule.cc index 3fc5dbeb02..7c5c98f04e 100644 --- a/tensorflow/compiler/xla/service/hlo_schedule.cc +++ b/tensorflow/compiler/xla/service/hlo_schedule.cc @@ -18,6 +18,7 @@ limitations under the License. #include <queue> #include <vector> +#include "absl/container/flat_hash_map.h" #include "absl/strings/str_format.h" #include "absl/strings/str_join.h" #include "tensorflow/compiler/xla/map_util.h" @@ -30,7 +31,7 @@ namespace xla { /* static */ StatusOr<HloSchedule> HloSchedule::CreateFromProto( const HloModule* module, const HloScheduleProto& proto) { - tensorflow::gtl::FlatMap<int64, const HloComputation*> id_to_computation; + absl::flat_hash_map<int64, const HloComputation*> id_to_computation; for (const HloComputation* computation : module->computations()) { id_to_computation[computation->unique_id()] = computation; } @@ -44,7 +45,7 @@ namespace xla { << "No computation exists in HLO module with id " << computation_id; const HloComputation* computation = comp_it->second; - tensorflow::gtl::FlatMap<int64, const HloInstruction*> id_to_instruction; + absl::flat_hash_map<int64, const HloInstruction*> id_to_instruction; for (const HloInstruction* instruction : computation->instructions()) { id_to_instruction[instruction->unique_id()] = instruction; } @@ -112,7 +113,7 @@ Status HloSchedule::UpdateComputationSchedule( const HloComputation* computation) { // Map from unique ID to HloInstruction pointer for instructions in the // computation. - tensorflow::gtl::FlatMap<int, const HloInstruction*> id_to_instruction; + absl::flat_hash_map<int, const HloInstruction*> id_to_instruction; for (const HloInstruction* instruction : computation->instructions()) { InsertOrDie(&id_to_instruction, instruction->unique_id(), instruction); } @@ -126,15 +127,13 @@ Status HloSchedule::UpdateComputationSchedule( // Map from HloInstruction X to newly added instructions (instruction is in // computation, but not in schedule) which use X. If an instruction is not in // the map, then it has no users which are newly added instructions. - tensorflow::gtl::FlatMap<const HloInstruction*, - std::vector<const HloInstruction*>> + absl::flat_hash_map<const HloInstruction*, std::vector<const HloInstruction*>> new_instruction_uses; // For each newly added instruction, this is the count of the instruction's // operands that have not yet been scheduled. When this value reaches zero, // then the instruction may be placed in the schedule. - tensorflow::gtl::FlatMap<const HloInstruction*, int> - unscheduled_operand_count; + absl::flat_hash_map<const HloInstruction*, int> unscheduled_operand_count; // Create a worklist of newly added instructions which are ready to be added // to the schedule. Initialize worklist with those that have zero operands. @@ -217,9 +216,9 @@ Status HloSchedule::Update() { } for (auto it = sequences_.begin(); it != sequences_.end();) { if (nonfusion_computations_ids.count(it->first) == 0) { - it = sequences_.erase(it); + sequences_.erase(it++); } else { - it++; + ++it; } } } @@ -254,7 +253,7 @@ Status HloSchedule::Verify() const { // For each computation verify the set of instructions is the same and that // each dependency and control edge is honored. for (const HloComputation* computation : nonfusion_computations) { - tensorflow::gtl::FlatMap<const HloInstruction*, int> instruction_position; + absl::flat_hash_map<const HloInstruction*, int> instruction_position; int pos = 0; for (const HloInstruction* instruction : sequence(computation).instructions()) { |