aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/mpi_collectives/mpi_allgather_test.py
blob: c23dd33d579f364a031b591db6b08f383483e1a2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================


from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
import numpy as np
import tensorflow as tf
import tensorflow.contrib.mpi_collectives as mpi
from tensorflow.python.platform import test


average_allgather = False


class AllgatherTest(test.TestCase):
  def checkAllgather(self, num_ranks, all_gathered, local_gathered):
    # Ensure that indices match.
    all_gat_ind = np.sort(all_gathered.indices)
    loc_gat_ind = np.sort(local_gathered.indices)
    assert(len(loc_gat_ind) == len(all_gat_ind))
    for i in range(len(loc_gat_ind)):
      assert(loc_gat_ind[i] == all_gat_ind[i])

    # For each index, verify same values.
    local_checked = []
    for i in range(len(local_gathered.indices)):
      local_checked.append(False)
    for i in range(len(all_gathered.indices)):
      all_index = all_gathered.indices[i]
      # TODO(jthestness): Make this lookup quicker using sorting.
      loc_index = -1
      for j in range(len(local_gathered.indices)):
        if local_gathered.indices[j] == all_index and not local_checked[j]:
          loc_index = j
          local_checked[j] = True
          break
      assert(loc_index >= 0)
      correct_output = local_gathered.values[loc_index][0]
      if average_allgather:
        correct_output = correct_output / float(num_ranks)
      assert(all_gathered.values[i][0] == correct_output)


  def test_mpi_allgather(self):
    # Get MPI rank
    my_rank = int(os.environ['PMI_RANK'])
    num_ranks = int(os.environ['PMI_SIZE'])

    indices_per_rank = 100
    tensor_width = 10

    # Create IndexedSlices for each rank, some with overlapping indices.
    to_gather_indices = []
    to_gather_values = []
    to_gather = []
    for rank_id in range(num_ranks):
      indices = []
      values = []
      my_multiple = rank_id + 1
      current_index = my_multiple
      for i in range(indices_per_rank):
        indices.append(current_index)
        ones_tensor = tf.ones([tensor_width])
        values.append(tf.multiply(ones_tensor,
                                  tf.fill(ones_tensor.get_shape(),
                                          float(current_index))))
        current_index += my_multiple
      concat_ind = tf.stack(indices)
      concat_vals = tf.stack(values)
      to_gather_indices.append(concat_ind)
      to_gather_values.append(concat_vals)
      to_gather.append(tf.IndexedSlices(concat_vals, concat_ind))

    # Collect the local IndexedSlices (indices and values) to create
    # correct IndexedSlices output.
    correct_gather_indices = tf.concat(to_gather_indices, 0)
    correct_gather_values = tf.concat(to_gather_values, 0)
    correct_gather = tf.IndexedSlices(correct_gather_values,
                                      correct_gather_indices)

    all_gather = mpi.allreduce(to_gather[my_rank], average_allgather)

    # NOTE: This assumes that device IDs are numbered the same as ranks.
    gpu_options = tf.GPUOptions(visible_device_list=str(my_rank))
    config = tf.ConfigProto(gpu_options=gpu_options)

    # MPI Session to test allgather.
    with mpi.Session(config=config) as sess:
      sess.run(tf.global_variables_initializer())

      all_gathered, local_gathered = sess.run([all_gather, correct_gather])

      # Compare all_gathered with local_gathered.
      self.checkAllgather(num_ranks, all_gathered, local_gathered)


if __name__ == '__main__':
  test.main()