diff options
author | Sherry Moore <sherrym@google.com> | 2016-03-04 13:45:13 -0800 |
---|---|---|
committer | TensorFlower Gardener <gardener@tensorflow.org> | 2016-03-04 14:35:40 -0800 |
commit | 1442f04dfbd715ead1d32b6395f5346e5256578f (patch) | |
tree | f825c2f71f5b0c27106599a60fa4516495093889 /tensorflow/core/protobuf | |
parent | 2cfc4405b140808efd6896fedfb28d21d1b81d04 (diff) |
Moved config.proto from core/framework to core/protobuf.
Change: 116396958
Diffstat (limited to 'tensorflow/core/protobuf')
-rw-r--r-- | tensorflow/core/protobuf/config.proto | 159 | ||||
-rw-r--r-- | tensorflow/core/protobuf/master.proto | 2 | ||||
-rw-r--r-- | tensorflow/core/protobuf/tensorflow_server.proto | 2 | ||||
-rw-r--r-- | tensorflow/core/protobuf/worker.proto | 2 |
4 files changed, 162 insertions, 3 deletions
diff --git a/tensorflow/core/protobuf/config.proto b/tensorflow/core/protobuf/config.proto new file mode 100644 index 0000000000..c6da869cce --- /dev/null +++ b/tensorflow/core/protobuf/config.proto @@ -0,0 +1,159 @@ +syntax = "proto3"; + +package tensorflow; +// option cc_enable_arenas = true; +option java_outer_classname = "ConfigProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; + +import "tensorflow/core/framework/step_stats.proto"; + +message GPUOptions { + // A value between 0 and 1 that indicates what fraction of the + // available GPU memory to pre-allocate for each process. 1 means + // to pre-allocate all of the GPU memory, 0.5 means the process + // allocates ~50% of the available GPU memory. + double per_process_gpu_memory_fraction = 1; + + // The type of GPU allocation strategy to use. + // + // Allowed values: + // "": The empty string (default) uses a system-chosen default + // which may change over time. + // + // "BFC": A "Best-fit with coalescing" algorithm, simplified from a + // version of dlmalloc. + string allocator_type = 2; + + // Delay deletion of up to this many bytes to reduce the number of + // interactions with gpu driver code. If 0, the system chooses + // a reasonable default (several MBs). + int64 deferred_deletion_bytes = 3; +}; + +// Options passed to the graph optimizer +message OptimizerOptions { + // If true, optimize the graph using common subexpression elimination. + bool do_common_subexpression_elimination = 1; + + // If true, perform constant folding optimization on the graph. + bool do_constant_folding = 2; + + // If true, perform function inlining on the graph. + bool do_function_inlining = 4; + + // Optimization level + enum Level { + // L1 is the default level. + // Optimization performed at L1 : + // 1. Common subexpression elimination + L1 = 0; + + // Optimization performed at L2 : + // 1. Common subexpression elimination + // 2. Constant folding + L2 = 2; + + // No optimizations + L0 = -1; + } + + Level opt_level = 3; +} + +message GraphOptions { + // Removed, use optimizer_options below. + reserved "skip_common_subexpression_elimination"; + reserved 1; + + // If true, use control flow to schedule the activation of Recv nodes. + // (Currently ignored.) + bool enable_recv_scheduling = 2; + + // Options controlling how graph is optimized. + OptimizerOptions optimizer_options = 3; +}; + +// Session configuration parameters. +// The system picks an appropriate values for fields that are not set. +message ConfigProto { + // Map from device type name (e.g., "CPU" or "GPU" ) to maximum + // number of devices of that type to use. If a particular device + // type is not found in the map, the system picks an appropriate + // number. + map<string, int32> device_count = 1; + + // The execution of an individual op (for some op types) can be + // parallelized on a pool of intra_op_parallelism_threads. + // 0 means the system picks an appropriate number. + int32 intra_op_parallelism_threads = 2; + + // Nodes that perform blocking operations are enqueued on a pool of + // inter_op_parallelism_threads available in each process. + // + // 0 means the system picks an appropriate number. + // + // Note that the first Session created in the process sets the + // number of threads for all future sessions unless use_per_session_threads is + // true. + int32 inter_op_parallelism_threads = 5; + + // If true, use a new set of threads for this session rather than the global + // pool of threads. Only supported by direct sessions. + // + // If false, use the global threads created by the first session. + bool use_per_session_threads = 9; + + // Assignment of Nodes to Devices is recomputed every placement_period + // steps until the system warms up (at which point the recomputation + // typically slows down automatically). + int32 placement_period = 3; + + // When any filters are present sessions will ignore all devices which do not + // match the filters. Each filter can be partially specified, e.g. "/job:ps" + // "/job:worker/replica:3", etc. + repeated string device_filters = 4; + + // Options that apply to all GPUs. + GPUOptions gpu_options = 6; + + // Whether soft placement is allowed. If allow_soft_placement is true, + // an op will be placed on CPU if + // 1. there's no GPU implementation for the OP + // or + // 2. no GPU devices are known or registered + // or + // 3. need to co-locate with reftype input(s) which are from CPU. + bool allow_soft_placement = 7; + + // Whether device placements should be logged. + bool log_device_placement = 8; + + // Options that apply to all graphs. + GraphOptions graph_options = 10; + + // Global timeout for all blocking operations in this session. If non-zero, + // and not overridden on a per-operation basis, this value will be used as the + // deadline for all blocking operations. + int64 operation_timeout_in_ms = 11; +}; + +// EXPERIMENTAL. Options for a single Run() call. +message RunOptions { + enum TraceLevel { + NO_TRACE = 0; + FULL_TRACE = 1; + } + TraceLevel trace_level = 1; + + // Time to wait for operation to complete in milliseconds. + int64 timeout_in_ms = 2; +} + +// EXPERIMENTAL. Metadata output (i.e., non-Tensor) for a single Run() call. +message RunOutputs { + // Statistics traced for this step. Populated if tracing is turned on via the + // "RunOptions" proto. + // EXPERIMENTAL: The format and set of events may change in future versions. + StepStats step_stats = 1; +} diff --git a/tensorflow/core/protobuf/master.proto b/tensorflow/core/protobuf/master.proto index e46581bdab..235bad8655 100644 --- a/tensorflow/core/protobuf/master.proto +++ b/tensorflow/core/protobuf/master.proto @@ -21,10 +21,10 @@ option java_outer_classname = "DistributedRuntimeProtos"; option java_multiple_files = true; option java_package = "org.tensorflow.distruntime"; -import "tensorflow/core/framework/config.proto"; import "tensorflow/core/framework/device_attributes.proto"; import "tensorflow/core/framework/graph.proto"; import "tensorflow/core/framework/tensor.proto"; +import "tensorflow/core/protobuf/config.proto"; //////////////////////////////////////////////////////////////////////////////// // diff --git a/tensorflow/core/protobuf/tensorflow_server.proto b/tensorflow/core/protobuf/tensorflow_server.proto index 5580e97920..5b4ee3e85a 100644 --- a/tensorflow/core/protobuf/tensorflow_server.proto +++ b/tensorflow/core/protobuf/tensorflow_server.proto @@ -15,7 +15,7 @@ limitations under the License. syntax = "proto3"; -import "tensorflow/core/framework/config.proto"; +import "tensorflow/core/protobuf/config.proto"; package tensorflow; // option cc_enable_arenas = true; diff --git a/tensorflow/core/protobuf/worker.proto b/tensorflow/core/protobuf/worker.proto index bb01b65d8b..8469c3823b 100644 --- a/tensorflow/core/protobuf/worker.proto +++ b/tensorflow/core/protobuf/worker.proto @@ -22,11 +22,11 @@ option java_multiple_files = true; option java_package = "org.tensorflow.distruntime"; import "google/protobuf/any.proto"; -import "tensorflow/core/framework/config.proto"; import "tensorflow/core/framework/step_stats.proto"; import "tensorflow/core/framework/device_attributes.proto"; import "tensorflow/core/framework/graph.proto"; import "tensorflow/core/framework/tensor.proto"; +import "tensorflow/core/protobuf/config.proto"; //////////////////////////////////////////////////////////////////////////////// // |