aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/python/ops/transpose_benchmark.py
blob: 6b5f0f20d8cc88e57174375602352ba69877273d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for Transpose op."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import time

import numpy as np

from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test


def build_graph(device, input_shape, perm, datatype, num_iters):
  """builds a graph containing a sequence of conv2d operations.

  Args:
    device: String, the device to run on.
    input_shape: Shape of the input tensor.
    perm: A list of ints with the same length as input tensor's dimension.
    datatype: numpy data type of the input tensor.
    num_iters: number of iterations to run transpose.

  Returns:
    An array of tensors to run()
  """
  with ops.device("/%s:0" % device):
    total_size = np.prod(input_shape)
    inp = np.arange(1, total_size + 1, dtype=datatype).reshape(input_shape)
    t = constant_op.constant(inp, shape=input_shape)

    outputs = []
    transpose_op = array_ops.transpose(t, perm)
    outputs.append(transpose_op)
    for _ in range(1, num_iters):
      with ops.control_dependencies([transpose_op]):
        transpose_op = array_ops.transpose(t, perm)
        outputs.append(transpose_op)
    return control_flow_ops.group(*outputs)


class TransposeBenchmark(test.Benchmark):
  """Benchmark transpose!"""

  def _run_graph(self, device, input_shape, perm, num_iters, datatype):
    """runs the graph and print its execution time.

    Args:
      device: String, the device to run on.
      input_shape: Shape of the input tensor.
      perm: A list of ints with the same length as input tensor's dimension.
      num_iters: Number of iterations to run the benchmark.
      datatype: numpy data type of the input tensor.

    Returns:
      The duration of the run in seconds.
    """
    graph = ops.Graph()
    with graph.as_default():
      outputs = build_graph(device, input_shape, perm, datatype, num_iters)
      with session_lib.Session(graph=graph) as session:
        variables.global_variables_initializer().run()
        # warmup runs
        session.run(outputs)
        start_time = time.time()
        session.run(outputs)

        duration = (time.time() - start_time) / num_iters
        throughput = np.prod(
            np.array(input_shape)) * datatype().itemsize * 2 / duration / 1e9

        print("%s %s inputshape:%s perm:%s %d %.6fsec, %.4fGB/s." %
              (device, str(datatype), str(input_shape).replace(" ", ""),
               str(perm).replace(" ", ""), num_iters, duration, throughput))

    name_template = (
        "transpose_{device}_{dtype}_input_shape_{inputshape}_perm_{perm}")

    self.report_benchmark(
        name=name_template.format(
            device=device,
            dtype=str(datatype).replace(" ", ""),
            inputshape=str(input_shape).replace(" ", ""),
            perm=str(perm).replace(" ", "")).replace(" ", ""),
        iters=num_iters,
        wall_time=duration)

    return duration

  def benchmark_transpose(self):
    print("transpose benchmark:")

    datatypes = [np.complex128, np.float64, np.float32, np.float16, np.int8]

    small_shapes = [[2, 20, 20, 20, 16], [2, 16, 20, 20, 20]] * 2
    small_shapes += [[2, 100, 100, 16], [2, 16, 100, 100]] * 2
    small_shapes += [[2, 5000, 16], [2, 16, 5000]] * 2
    small_perms = [[0, 4, 1, 2, 3], [0, 2, 3, 4, 1]] + [[4, 1, 2, 3, 0]] * 2
    small_perms += [[0, 3, 1, 2], [0, 2, 3, 1]] + [[3, 1, 2, 0]] * 2
    small_perms += [[0, 2, 1]] * 2 + [[2, 1, 0]] * 2

    large_shapes = [[2, 40, 40, 40, 32], [2, 40, 40, 40, 64]] * 2 + [[
        2, 300, 300, 32
    ], [2, 300, 300, 64]] * 2 + [[2, 100000, 32], [2, 100000, 64]] * 2
    large_perms = [[0, 4, 1, 2, 3], [0, 2, 3, 4, 1]] + [[4, 1, 2, 3, 0]] * 2 + [
        [0, 3, 1, 2], [0, 2, 3, 1]
    ] + [[3, 1, 2, 0]] * 2 + [[0, 2, 1]] * 2 + [[2, 1, 0]] * 2

    num_iters = 40
    for datatype in datatypes:
      for ishape, perm in zip(small_shapes, small_perms):
        self._run_graph("gpu", ishape, perm, num_iters, datatype)

      if datatype is not np.complex128:
        if datatype is not np.float16:
          for ishape, perm in zip(large_shapes, large_perms):
            self._run_graph("gpu", ishape, perm, num_iters, datatype)

    small_dim_large_shapes = [[2, 10000, 3], [2, 3, 10000], [2, 10000, 8],
                              [2, 8, 10000]]
    small_dim_small_shapes = [[2, 5000, 3], [2, 3, 5000], [2, 5000, 8],
                              [2, 8, 5000]]
    small_dim_perms = [[0, 2, 1]] * 4

    num_iters = 320
    small_dim_large_shape_datatypes = [np.float64, np.float32, np.int8]
    for datatype in small_dim_large_shape_datatypes:
      for ishape, perm in zip(small_dim_large_shapes, small_dim_perms):
        self._run_graph("gpu", ishape, perm, num_iters, datatype)

    small_dim_small_shape_datatypes = [np.complex128, np.float16]
    for datatype in small_dim_small_shape_datatypes:
      for ishape, perm in zip(small_dim_small_shapes, small_dim_perms):
        self._run_graph("gpu", ishape, perm, num_iters, datatype)


if __name__ == "__main__":
  test.main()