aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/core/protobuf/rewriter_config.proto
blob: 029b27cd04370516404b00b7583914057087fe05 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
syntax = "proto3";

package tensorflow;
option cc_enable_arenas = true;
option java_outer_classname = "RewriterConfigProtos";
option java_multiple_files = true;
option java_package = "org.tensorflow.framework";

import "tensorflow/core/framework/attr_value.proto";

message AutoParallelOptions {
  bool enable = 1;
  int32 num_replicas = 2;
}

message RewriterConfig {
  // Graph rewriting is experimental and subject to change, not covered by any
  // API stability guarantees.

  // Configuration options for the meta-optimizer. Unless otherwise noted, these
  // configuration options do not apply to explicitly triggered optimization
  // passes in the optimizers field.

  enum Toggle {
    DEFAULT = 0;
    ON = 1;
    OFF = 2;
    // Enable some aggressive optimizations that use assumptions that TF graphs
    // may break. For example, assume the shape of a placeholder matches its
    // actual feed.
    AGGRESSIVE = 3;
  }

  // Enum controling the number of times to run optimizers. The default is to
  // run them once.
  enum NumIterationsType {
    DEFAULT_NUM_ITERS = 0;
    ONE = 1;
    TWO = 2;
  }

  // Optimize tensor layouts (default is ON)
  // e.g. This will try to use NCHW layout on GPU which is faster.
  Toggle layout_optimizer = 1;
  // Fold constants (default is ON)
  // Statically infer the value of tensors when possible, and materialize the
  // result using constants.
  Toggle constant_folding = 3;
  // Arithmetic optimizations (default is ON)
  // e.g. Simplify arithmetic ops; merge ops with same value (like constants).
  Toggle arithmetic_optimization = 7;
  // Control dependency optimizations (default is ON).
  // Remove redundant control dependencies, which may enable other optimization.
  Toggle dependency_optimization = 8;
  // Loop optimizations (default is ON).
  Toggle loop_optimization = 9;
  // Function optimizations (default is ON).
  Toggle function_optimization = 10;
  // Strips debug-related nodes from the graph (off by default).
  Toggle debug_stripper = 11;
  // If true, don't remove unnecessary ops from the graph
  bool disable_model_pruning = 2;

  // Controls how many times we run the optimizers in meta optimizer (default
  // is once).
  NumIterationsType meta_optimizer_iterations = 12;

  enum MemOptType {
    // The default setting (SCHEDULING and SWAPPING HEURISTICS only)
    DEFAULT_MEM_OPT = 0;
    // Disabled in the meta-optimizer.
    NO_MEM_OPT = 1;
    // Driven by manual op-level annotations.
    MANUAL = 2;

    // Driven by heuristics. The behavior of these heuristics is subject to
    // change. Currently includes an experimental recomputation and swapping
    // heuristics. Manual annotations are respected, but additional nodes are
    // selected automatically.

    // Swapping heuristic will move a tensor from the GPU to the CPU and move
    // it back when needed to reduce peak memory usage.
    SWAPPING_HEURISTICS = 4;
    // Recomputation heuristics will recompute ops (such as Relu activation)
    // during backprop instead of storing them, reducing peak memory usage.
    RECOMPUTATION_HEURISTICS = 5;
    // Scheduling will split big ops such as AddN and try to enforce a schedule
    // of the new computations that decreases peak memory usage.
    SCHEDULING_HEURISTICS = 6;
    // Use any combination of swapping and recomputation heuristics.
    HEURISTICS = 3;
  }
  // Configures memory optimization passes through the meta-optimizer. Has no
  // effect on manually requested memory optimization passes in the optimizers
  // field.
  MemOptType memory_optimization = 4;
  // A node name scope for node names which are valid outputs of recompuations.
  // Inputs to nodes that match this scope may be recomputed (subject either to
  // manual annotation of those input nodes or to manual annotation and
  // heuristics depending on memory_optimization), but the nodes themselves will
  // not be recomputed. This matches any sub-scopes as well, meaning the scope
  // can appear not just as a top-level scope. For example, if the value is
  // "gradients/", the default, it will match node name "gradients/foo",
  // "foo/gradients/bar", but not "foo_gradients/"
  string memory_optimizer_target_node_name_scope = 6;

  // Configures AutoParallel optimization passes either through the
  // meta-optimizer or when manually specified through the optimizers field.
  AutoParallelOptions auto_parallel = 5;

  // If non-empty, will use this as an alternative way to specify a list of
  // optimizations to turn on and the order of the optimizations (replacing the
  // meta-optimizer).
  //
  // Of the RewriterConfig options, only the AutoParallel configuration options
  // (the auto_parallel field) apply to manually requested optimization passes
  // ("autoparallel"). Memory optimization passes ("memory") invoked here are
  // not configurable (in contrast to memory optimization passes through the
  // meta-optimizer) and act only on manual op annotations.
  //
  // Custom registered optimizers will be run after the base optimizers, in
  // the order that they are specified.
  repeated string optimizers = 100;

  // Message to describe custom graph optimizer and its parameters
  message CustomGraphOptimizer {
    string name = 1;
    map<string, AttrValue> parameter_map = 2;
  }

  // list of CustomGraphOptimizers to apply.
  repeated CustomGraphOptimizer custom_optimizers = 200;
}