aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/compiler/xla/service/buffer_assignment_test.cc
diff options
context:
space:
mode:
authorGravatar Mark Heffernan <meheff@google.com>2018-09-05 10:34:12 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-09-05 10:41:37 -0700
commit7fa693209fe238478739b3982f652a7e35be91f3 (patch)
treeeb31635c366d9eceb144970ddb2b659441204ce1 /tensorflow/compiler/xla/service/buffer_assignment_test.cc
parent08313b87960962efb98bcd684776c8305fa9909a (diff)
Add HloSchedule class representing a sequential order of an HloModule.
Currently we represent a sequential schedule of a module using a SequentialHloOrdering::HloModuleSequence which is a type alias of a bare map from HloComputation* to std::vector<HloInstruction*>. This CL replaces this with a proper class which results in better encapsulation of code which deals with schedules and better enforcement of invariants. This CL also fixes a corner-case bug in dataflow analysis, where values of instructions which are live out of the computation erroneously did not interfere with the values of instructions scheduled after the root instruction. PiperOrigin-RevId: 211656888
Diffstat (limited to 'tensorflow/compiler/xla/service/buffer_assignment_test.cc')
-rw-r--r--tensorflow/compiler/xla/service/buffer_assignment_test.cc98
1 files changed, 40 insertions, 58 deletions
diff --git a/tensorflow/compiler/xla/service/buffer_assignment_test.cc b/tensorflow/compiler/xla/service/buffer_assignment_test.cc
index 7398f105a0..03e155fc11 100644
--- a/tensorflow/compiler/xla/service/buffer_assignment_test.cc
+++ b/tensorflow/compiler/xla/service/buffer_assignment_test.cc
@@ -33,6 +33,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
#include "tensorflow/compiler/xla/service/hlo_ordering.h"
#include "tensorflow/compiler/xla/service/hlo_parser.h"
+#include "tensorflow/compiler/xla/service/hlo_schedule.h"
#include "tensorflow/compiler/xla/service/hlo_scheduling.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/test.h"
@@ -40,6 +41,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/tests/hlo_verified_test_base.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/macros.h"
namespace xla {
@@ -120,14 +122,10 @@ class BufferAssignmentTest : public HloVerifiedTestBase {
HloModule* module,
absl::Span<const HloInstruction* const> instruction_sequence,
int64 alignment = 1) {
- SequentialHloOrdering::HloModuleSequence module_sequence;
- module_sequence[module->entry_computation()] =
- std::vector<const HloInstruction*>(instruction_sequence.begin(),
- instruction_sequence.end());
+ HloSchedule schedule(module);
+ schedule.set_sequence(module->entry_computation(), instruction_sequence);
return BufferAssigner::Run(
- module,
- absl::make_unique<SequentialHloOrdering>(module,
- module_sequence),
+ module, absl::make_unique<SequentialHloOrdering>(schedule),
backend().compiler()->BufferSizeBytesFunction(),
[alignment](LogicalBuffer::Color) { return alignment; },
/*allow_input_output_aliasing=*/false,
@@ -1785,11 +1783,10 @@ class WhileBufferAssignmentTest : public HloVerifiedTestBase {
std::unique_ptr<BufferAssignment> RunBufferAssignment(HloModule* module,
int64 alignment = 1) {
- auto sequence =
- ScheduleComputationsInModule(*module, ByteSizeOf).ConsumeValueOrDie();
+ HloSchedule schedule =
+ ScheduleModule(*module, ByteSizeOf).ConsumeValueOrDie();
return BufferAssigner::Run(
- module,
- absl::make_unique<SequentialHloOrdering>(module, sequence),
+ module, absl::make_unique<SequentialHloOrdering>(schedule),
ByteSizeOf,
[alignment](LogicalBuffer::Color) { return alignment; },
/*allow_input_output_aliasing=*/false,
@@ -2096,17 +2093,25 @@ TEST_F(WhileBufferAssignmentTest, ColocatedBuffers) {
// Create a sequential order among all the instructions in the entry
// computation, since the issue this test stresses depends on the order the
// nodes are traversed during BufferAssignment.
- SequentialHloOrdering::HloModuleSequence sequence;
- sequence[module->entry_computation()] = {
- token, infeed, infeed_data, while0, while1, zero, add, while2, tuple};
+ TF_ASSERT_OK_AND_ASSIGN(
+ HloSchedule schedule,
+ ScheduleModule(*module, [](const BufferValue& buffer) {
+ return ShapeUtil::ByteSizeOf(buffer.shape(),
+ /*pointer_size=*/sizeof(void*));
+ }));
+ schedule.set_sequence(
+ module->entry_computation(),
+ {token, infeed, infeed_data, while0, while1, zero, add, while2, tuple});
+ TF_ASSERT_OK(schedule.Verify());
+
TF_ASSERT_OK_AND_ASSIGN(
auto assignment,
- BufferAssigner::Run(
- module, absl::make_unique<SequentialHloOrdering>(module, sequence),
- backend().compiler()->BufferSizeBytesFunction(),
- [](LogicalBuffer::Color) { return 1; },
- /*allow_input_output_aliasing=*/false,
- /*allocate_buffers_for_constants=*/true));
+ BufferAssigner::Run(module,
+ absl::make_unique<SequentialHloOrdering>(schedule),
+ backend().compiler()->BufferSizeBytesFunction(),
+ [](LogicalBuffer::Color) { return 1; },
+ /*allow_input_output_aliasing=*/false,
+ /*allocate_buffers_for_constants=*/true));
// The result tuple elements must be assigned with different buffers.
TF_ASSERT_OK_AND_ASSIGN(auto slice0, assignment->GetUniqueSlice(tuple, {0}));
@@ -2263,29 +2268,6 @@ ENTRY Main {
GetAllocation(*buffers, param0, {1, 1}));
}
-static bool IsPostOrderTraversal(
- const std::vector<const HloInstruction*>& sequence) {
- tensorflow::gtl::FlatSet<const HloInstruction*> seen_so_far;
- auto has_not_been_seen_yet = [&](const HloInstruction* instruction) {
- return seen_so_far.count(instruction) == 0;
- };
-
- for (auto instruction : sequence) {
- if (std::any_of(instruction->operands().begin(),
- instruction->operands().end(), has_not_been_seen_yet) ||
- std::any_of(instruction->control_predecessors().begin(),
- instruction->control_predecessors().end(),
- has_not_been_seen_yet)) {
- return false; // Not a post order.
- }
- if (!seen_so_far.insert(instruction).second) {
- return false; // Not a "traversal".
- }
- }
-
- return true;
-}
-
TEST_F(WhileBufferAssignmentTest, WhileLoopsInterferingResultRange) {
auto module = CreateNewModule();
auto builder = HloComputation::Builder(TestName());
@@ -2340,27 +2322,27 @@ TEST_F(WhileBufferAssignmentTest, WhileLoopsInterferingResultRange) {
RunCopyInsertion(module);
- auto sequence =
- ScheduleComputationsInModule(*module, ByteSizeOf).ConsumeValueOrDie();
+ HloSchedule schedule =
+ ScheduleModule(*module, ByteSizeOf).ConsumeValueOrDie();
- // To trigger b/38494731, we want a specific Hlo sequence for the
+ // To trigger b/38494731, we want a specific Hlo schedule for the
// root computation, so we overwrite that entry with a manually
// crafted sequence.
- sequence[module->entry_computation()] = {
- input1, weights1, one, output1, while1->operand(0), while1,
- input0, weights0, zero, output0, while0->operand(0), while0,
- gte0, gte1, root_add};
+ schedule.set_sequence(module->entry_computation(),
+ {input1, weights1, one, output1, while1->operand(0),
+ while1, input0, weights0, zero, output0,
+ while0->operand(0), while0, gte0, gte1, root_add});
- // If this ASSERT_TRUE fails, we constructed a bogus sequence above
- // and this test itself is buggy.
- ASSERT_TRUE(IsPostOrderTraversal(sequence[module->entry_computation()]));
+ // If this ASSERT fails, we constructed a bogus sequence above and this test
+ // itself is buggy.
+ TF_ASSERT_OK(schedule.Verify());
auto assignment =
- BufferAssigner::Run(
- module, absl::make_unique<SequentialHloOrdering>(module, sequence),
- ByteSizeOf, [](LogicalBuffer::Color) { return 1; },
- /*allow_input_output_aliasing=*/false,
- /*allocate_buffers_for_constants=*/true)
+ BufferAssigner::Run(module,
+ absl::make_unique<SequentialHloOrdering>(schedule),
+ ByteSizeOf, [](LogicalBuffer::Color) { return 1; },
+ /*allow_input_output_aliasing=*/false,
+ /*allocate_buffers_for_constants=*/true)
.ConsumeValueOrDie();
EXPECT_TRUE(BuffersDistinct({while0}, {while1}, *assignment));