aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-06-08 11:18:23 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-06-08 11:21:46 -0700
commitebb67e0d7da53b3b848630e63aaa80f1283d83bd (patch)
tree6d0d2c8886f7da5b20b53215426f37201bb85212
parente8ca21f1533361aaad5acf1738239266b95dae12 (diff)
Delete deprecated protos.
PiperOrigin-RevId: 199822232
-rw-r--r--tensorflow/compiler/xla/rpc/xla_service.proto16
-rw-r--r--tensorflow/compiler/xla/xla.proto94
-rw-r--r--tensorflow/compiler/xla/xla_data.proto390
3 files changed, 1 insertions, 499 deletions
diff --git a/tensorflow/compiler/xla/rpc/xla_service.proto b/tensorflow/compiler/xla/rpc/xla_service.proto
index 92eb19ec0f..551ae895e0 100644
--- a/tensorflow/compiler/xla/rpc/xla_service.proto
+++ b/tensorflow/compiler/xla/rpc/xla_service.proto
@@ -115,10 +115,6 @@ service XlaService {
returns (ComputeConstantResponse) {
}
- // Retrieves the inferred shape for a value within a computation.
- rpc GetLocalShape(GetLocalShapeRequest) returns (GetLocalShapeResponse) {
- }
-
// Requests one or more device handles from the target. The returned device
// handles can be used to specify the device on which to execute computations
// or transfer data.
@@ -132,18 +128,6 @@ service XlaService {
returns (CreateChannelHandleResponse) {
}
- // Requests that the referenced computation be specialized for the provided
- // arguments for subsequent execution. This permits things such as value
- // specialization.
- rpc Specialize(SpecializeRequest) returns (SpecializeResponse) {
- }
-
- // Modifies the provided computation so that subsequent executions
- // will compute the provided ComputationDataHandle, rather than the
- // last expression enqueued on that Computation.
- rpc SetReturnValue(SetReturnValueRequest) returns (SetReturnValueResponse) {
- }
-
// Invokes the provided computation with the provided global data passed as
// immutable arguments. The request contains the whole computation graph.
// Returns global data output and execution timing.
diff --git a/tensorflow/compiler/xla/xla.proto b/tensorflow/compiler/xla/xla.proto
index 53ba120d21..6f07e4606b 100644
--- a/tensorflow/compiler/xla/xla.proto
+++ b/tensorflow/compiler/xla/xla.proto
@@ -225,14 +225,6 @@ message ExecutionOptions {
repeated DeviceHandle device_handles = 5;
}
-message SnapshotComputationRequest {
- ComputationHandle computation = 1;
-}
-
-message LoadComputationSnapshotResponse {
- ComputationHandle computation = 1;
-}
-
message GetDeviceHandlesRequest {
int64 device_count = 1;
}
@@ -291,11 +283,6 @@ message ResetDeviceRequest {
message ResetDeviceResponse {
}
-message ComputationStatsRequest {
- ComputationHandle computation = 1;
- DebugOptions debug_options = 2;
-}
-
message ComputationGraphStatsRequest {
HloModuleProto computation = 1;
DebugOptions debug_options = 2;
@@ -305,14 +292,6 @@ message ComputationStatsResponse {
ComputationStats stats = 1;
}
-message ComputationRequest {
- string name = 1;
-}
-
-message ComputationResponse {
- ComputationHandle computation = 1;
-}
-
message CreateChannelHandleRequest {
}
@@ -327,24 +306,6 @@ message UnregisterRequest {
message UnregisterResponse {
}
-message SetReturnValueRequest {
- ComputationHandle computation = 1;
- ComputationDataHandle operand = 2;
-}
-
-message SetReturnValueResponse {
-}
-
-message ExecuteRequest {
- reserved 3, 4;
-
- ComputationHandle computation = 1;
- repeated GlobalDataHandle arguments = 2;
-
- // Options that affect how XLA compiles and runs code to service this request.
- ExecutionOptions execution_options = 5;
-}
-
message ExecuteGraphRequest {
HloModuleProto computation = 1;
repeated GlobalDataHandle arguments = 2;
@@ -353,10 +314,6 @@ message ExecuteGraphRequest {
ExecutionOptions execution_options = 3;
}
-message ExecuteParallelRequest {
- repeated ExecuteRequest requests = 1;
-}
-
message ExecuteGraphParallelRequest {
repeated ExecuteGraphRequest requests = 1;
}
@@ -370,21 +327,6 @@ message ExecuteParallelResponse {
repeated ExecuteResponse responses = 1;
}
-message ExecuteAsyncRequest {
- reserved 3, 4;
-
- ComputationHandle computation = 1;
- repeated GlobalDataHandle arguments = 2;
-
- // Options that affect how XLA compiles and runs code to service this request.
- ExecutionOptions execution_options = 6;
-}
-
-message ExecuteAsyncResponse {
- // A handle to the execution launched asynchronously.
- ExecutionHandle execution = 1;
-}
-
message WaitForExecutionRequest {
ExecutionHandle execution = 1;
}
@@ -394,31 +336,13 @@ message WaitForExecutionResponse {
ExecutionProfile profile = 2;
}
-message IsConstantRequest {
- ComputationHandle computation = 1;
- ComputationDataHandle operand = 2;
- int64 num_parameters = 3;
-}
-
-message IsConstantResponse {
- bool is_constant = 1;
-}
-
-message ComputeConstantRequest {
- ComputationHandle computation = 1;
- ComputationDataHandle operand = 2;
- Layout output_layout = 3;
- repeated LiteralProto parameters = 4;
-}
-
message ComputeConstantGraphRequest {
HloModuleProto computation = 1;
Layout output_layout = 2;
}
message ComputeConstantResponse {
- // A LiteralProto is returned directly for this request, instead of a
- // ComputationDataHandle.
+ // A LiteralProto is returned directly for this request.
LiteralProto literal = 1;
}
@@ -460,14 +384,6 @@ message LoadDataResponse {
int64 nanoseconds = 5;
}
-message SpecializeRequest {
- ComputationHandle computation = 1;
- repeated GlobalDataHandle arguments = 2;
-}
-
-message SpecializeResponse {
-}
-
message GetShapeRequest {
GlobalDataHandle data = 1;
}
@@ -476,14 +392,6 @@ message GetShapeResponse {
Shape shape = 1;
}
-message GetComputationShapeRequest {
- ComputationHandle computation = 1;
-}
-
-message GetComputationShapeResponse {
- ProgramShape program_shape = 1;
-}
-
message UnpackRequest {
GlobalDataHandle data = 1;
}
diff --git a/tensorflow/compiler/xla/xla_data.proto b/tensorflow/compiler/xla/xla_data.proto
index 6bdfb0179c..963d3836ed 100644
--- a/tensorflow/compiler/xla/xla_data.proto
+++ b/tensorflow/compiler/xla/xla_data.proto
@@ -276,12 +276,6 @@ message ExecutionProfile {
int64 compute_and_transfer_time_ns = 5;
}
-// Handle given to a user that represents a computation that the user builds up
-// before execution.
-message ComputationHandle {
- int64 handle = 1;
-}
-
// Handle given to a user that represents an execution that the user launched
// asynchronously on the device.
message ExecutionHandle {
@@ -295,13 +289,6 @@ message GlobalDataHandle {
int64 handle = 1;
}
-// Handle given to a user that represents a data result in a computation.
-// This is used to pass to subsequent computations that depends upon the data as
-// an operand.
-message ComputationDataHandle {
- int64 handle = 1;
-}
-
// Handle given to a user that represents a replicated virtual device. Each
// replicated device represents N physical devices for execution where N is the
// number of replicas.
@@ -441,44 +428,6 @@ message GatherDimensionNumbers {
int64 index_vector_dim = 4;
}
-// Operation requests that are all collected as a tagged union with a oneof
-// field in OpRequest.
-
-message ConstantRequest {
- LiteralProto literal = 2;
-}
-
-message GetTupleElementRequest {
- ComputationDataHandle operand = 2;
- int64 index = 3;
-}
-
-message SliceRequest {
- ComputationDataHandle operand = 2;
- repeated int64 start_indices = 3;
- repeated int64 limit_indices = 4;
- repeated int64 strides = 5;
-}
-
-message DynamicSliceRequest {
- // Operand from which to slice at dynamic 'start_indices'.
- ComputationDataHandle operand = 2;
- // Dynamically computed 'start_indices' for slice operation.
- ComputationDataHandle start_indices = 3;
- // Slice sizes for each dimension (note that indices calculations are computed
- // modulo dimension sizes to avoid out-of-bound array accesses).
- repeated int64 slice_sizes = 4;
-}
-
-message DynamicUpdateSliceRequest {
- // Operand on which slice 'update' is to be applied.
- ComputationDataHandle operand = 2;
- // The slice update to apply to 'operand'.
- ComputationDataHandle update = 3;
- // Dynamically computed start indices for the update slice operation.
- ComputationDataHandle start_indices = 4;
-}
-
message ConvolutionDimensionNumbers {
// The number of the dimension that represents batch in the input.
int64 input_batch_dimension = 7;
@@ -516,13 +465,6 @@ message ConvolutionDimensionNumbers {
// Next = 13
};
-message ConvolveRequest {
- ComputationDataHandle lhs = 2;
- ComputationDataHandle rhs = 3; // This is the filter/kernel.
- Window window = 4; // Describes the filter/kernel.
- ConvolutionDimensionNumbers dimension_numbers = 5;
-}
-
enum FftType {
FFT = 0; // Forward FFT; complex in, complex out.
IFFT = 1; // Inverse FFT; complex in, complex out.
@@ -531,56 +473,6 @@ enum FftType {
// fft_length real out
}
-message FftRequest {
- FftType fft_type = 1;
- repeated int64 fft_length = 2; // Multivalent for higher-order FFT.
- ComputationDataHandle operand = 3;
-}
-
-message InfeedRequest {
- // The shape of the data returned by reading the device's infeed buffer.
- Shape shape = 2;
-
- // Additional infeed configuration for the backend.
- bytes config = 3;
-}
-
-message OutfeedRequest {
- // The shape of the data returned by reading the device's outfeed buffer.
- Shape shape = 1;
-
- // Operand to the Outfeed. Supports tuple.
- ComputationDataHandle operand = 2;
-
- // Backend-specific information for how to perform the outfeed.
- bytes outfeed_config = 3;
-}
-
-message CallRequest {
- ComputationHandle to_apply = 2;
- repeated ComputationDataHandle operands = 3;
-}
-
-message CustomCallRequest {
- string call_target_name = 2;
- repeated ComputationDataHandle operands = 3;
- Shape shape = 4;
-}
-
-message HostComputeRequest {
- // Operand to the HostCompute. Supports tuple.
- repeated ComputationDataHandle operands = 1;
-
- // Name used to identify HostSend/Recv channels.
- string channel_name = 2;
-
- // Cost estimate in nanoseconds.
- int64 cost_estimate_ns = 3;
-
- // The shape of any data returned by host.
- Shape shape = 4;
-}
-
message DotDimensionNumbers {
// The dimension numbers that represent the 'lhs' contracting dimensions.
repeated int64 lhs_contracting_dimensions = 1;
@@ -592,179 +484,6 @@ message DotDimensionNumbers {
repeated int64 rhs_batch_dimensions = 4;
};
-message DotRequest {
- ComputationDataHandle lhs = 2;
- ComputationDataHandle rhs = 3;
- DotDimensionNumbers dimension_numbers = 4;
-}
-
-message MapRequest {
- repeated ComputationDataHandle operands = 2;
- ComputationHandle to_apply = 3;
- repeated ComputationDataHandle static_operands = 4;
- // The dimensions over which to map.
- // Example mapping a Dot operation along the batch dimension 0:
- // operand0.shape = [2, 2, 2], operand1.shape = [2,2,3]
- // Map({operand0, operand1}, Dot, {0})
- repeated int64 dimensions = 5;
-}
-
-message ReduceRequest {
- // Operand to the reduction.
- ComputationDataHandle operand = 2;
-
- // Initial value for the reduction. This must be consistent with the result
- // shape of to_apply.
- ComputationDataHandle init_value = 3;
-
- // The dimensions to reduce over.
- repeated int64 dimensions = 4;
-
- // The computation to apply in the reduction.
- ComputationHandle to_apply = 5;
-}
-
-message ReduceWindowRequest {
- ComputationDataHandle operand = 2;
- ComputationDataHandle init_value = 3;
- Window window = 4;
- ComputationHandle to_apply = 5;
-}
-
-message BatchNormTrainingRequest {
- ComputationDataHandle operand = 1;
- ComputationDataHandle scale = 2;
- ComputationDataHandle offset = 3;
- float epsilon = 4;
- int64 feature_index = 5;
-}
-
-message BatchNormInferenceRequest {
- ComputationDataHandle operand = 1;
- ComputationDataHandle scale = 2;
- ComputationDataHandle offset = 3;
- ComputationDataHandle mean = 4;
- ComputationDataHandle variance = 5;
- float epsilon = 6;
- int64 feature_index = 7;
-}
-
-message BatchNormGradRequest {
- ComputationDataHandle operand = 1;
- ComputationDataHandle scale = 2;
- ComputationDataHandle mean = 3;
- ComputationDataHandle variance = 4;
- ComputationDataHandle grad_output = 5;
- float epsilon = 6;
- int64 feature_index = 7;
-}
-
-message CrossReplicaSumRequest {
- ComputationDataHandle operand = 2;
-}
-
-message SelectAndScatterRequest {
- // Operand array on which the windows slide.
- ComputationDataHandle operand = 2;
-
- // Source array for the data to scatter.
- ComputationDataHandle source = 3;
-
- // Initial scalar value for each element in the output.
- ComputationDataHandle init_value = 4;
-
- // Window configuration.
- Window window = 5;
-
- // Binary function used to select an element from each window.
- ComputationHandle select = 6;
-
- // Binary function used to combine each scattered value from source with the
- // current output value at the selected location.
- ComputationHandle scatter = 7;
-}
-
-message ReverseRequest {
- ComputationDataHandle operand = 2;
- repeated int64 dimensions = 3;
-}
-
-message BroadcastRequest {
- ComputationDataHandle operand = 2;
- repeated int64 broadcast_sizes = 3;
-}
-
-message PadRequest {
- ComputationDataHandle operand = 2;
- ComputationDataHandle padding_value = 3;
- PaddingConfig padding_config = 4;
-}
-
-message ReshapeRequest {
- ComputationDataHandle operand = 2;
-
- // The dimension order for collapse (from fastest-changing to slowest).
- repeated int64 dimensions = 3;
-
- // The new dimension sizes (from dimension 0 to n-1).
- repeated int64 new_sizes = 4;
-}
-
-message TransposeRequest {
- ComputationDataHandle operand = 2;
-
- // The permutation of the operand's dimensions (in the range 0 to n-1).
- repeated int64 dimensions = 3;
-}
-
-message ParameterRequest {
- Shape shape = 2;
- int64 parameter = 3;
- string name = 4;
-}
-
-message GetLocalShapeRequest {
- ComputationHandle computation = 1;
- ComputationDataHandle operand = 2;
-}
-
-message GetLocalShapeResponse {
- Shape shape = 1;
-}
-
-message TraceRequest {
- string tag = 2;
- ComputationDataHandle operand = 3;
-}
-
-message ConvertRequest {
- ComputationDataHandle operand = 2;
- PrimitiveType new_element_type = 3;
-}
-
-message ConcatenateRequest {
- repeated ComputationDataHandle operands = 2;
- // The dimension in which we concatenate; e.g. if you had dimension arrays of
- // [4, 1] and [5, 1], you'd concatenate in dimension 0 to produce a [9, 1].
- // Attempting to concatenate those in dimension 1 would produce an error, as
- // 4 != 5 (and there is no ragged array support).
- int64 dimension = 3;
-}
-
-message ConditionalRequest {
- ComputationDataHandle predicate = 2;
- ComputationDataHandle true_operand = 3;
- ComputationHandle true_computation = 4;
- ComputationDataHandle false_operand = 5;
- ComputationHandle false_computation = 6;
-}
-
-message WhileRequest {
- ComputationHandle condition = 2;
- ComputationHandle body = 3;
- ComputationDataHandle init = 4;
-}
-
enum UnaryOperation {
UNOP_INVALID = 0;
@@ -827,11 +546,6 @@ enum UnaryOperation {
UNOP_LOG1P = 19;
}
-message UnaryOpRequest {
- UnaryOperation unop = 2;
- ComputationDataHandle operand = 3;
-}
-
enum BinaryOperation {
BINOP_INVALID = 0;
@@ -876,13 +590,6 @@ enum BinaryOperation {
BINOP_ATAN2 = 24;
}
-message BinaryOpRequest {
- BinaryOperation binop = 2;
- ComputationDataHandle lhs = 3;
- ComputationDataHandle rhs = 4;
- repeated int64 broadcast_dimensions = 5;
-}
-
enum RandomDistribution {
RNG_INVALID = 0;
@@ -897,12 +604,6 @@ enum RandomDistribution {
// Next: 4
}
-message RngRequest {
- RandomDistribution distribution = 2;
- repeated ComputationDataHandle parameter = 3;
- Shape shape = 4;
-}
-
enum TernaryOperation {
TRIOP_INVALID = 0;
@@ -916,13 +617,6 @@ enum TernaryOperation {
TRIOP_CLAMP = 3;
}
-message TernaryOpRequest {
- TernaryOperation triop = 2;
- ComputationDataHandle lhs = 3;
- ComputationDataHandle rhs = 4;
- ComputationDataHandle ehs = 5;
-}
-
enum VariadicOperation {
VAROP_INVALID = 0;
@@ -930,34 +624,6 @@ enum VariadicOperation {
VAROP_TUPLE = 1;
}
-message VariadicOpRequest {
- VariadicOperation varop = 2;
- repeated ComputationDataHandle operands = 3;
-}
-
-message ReducePrecisionRequest {
- ComputationDataHandle operand = 1;
- int32 exponent_bits = 2;
- int32 mantissa_bits = 3;
-}
-
-message SendRequest {
- ComputationDataHandle operand = 1;
- ChannelHandle channel_handle = 2;
-}
-
-message RecvRequest {
- Shape shape = 1;
- ChannelHandle channel_handle = 2;
-}
-
-message GatherRequest {
- ComputationDataHandle input = 1;
- ComputationDataHandle gather_indices = 2;
- GatherDimensionNumbers dimension_numbers = 3;
- repeated int64 window_bounds = 4;
-}
-
message OpSharding {
enum Type {
// This sharding is replicated across all devices (implies maximal,
@@ -988,59 +654,3 @@ message OpSharding {
// to.
repeated OpSharding tuple_shardings = 5;
}
-
-message OpRequest {
- ComputationHandle computation = 1;
- OpMetadata metadata = 33;
- OpSharding sharding = 40;
-
- oneof op {
- BinaryOpRequest binary_op_request = 2;
- BroadcastRequest broadcast_request = 3;
- CallRequest call_request = 4;
- ConcatenateRequest concatenate_request = 5;
- ConstantRequest constant_request = 6;
- ConvertRequest convert_request = 7;
- ConvolveRequest convolve_request = 8;
- CrossReplicaSumRequest cross_replica_sum_request = 9;
- CustomCallRequest custom_call_request = 10;
- DotRequest dot_request = 43;
- DynamicSliceRequest dynamic_slice_request = 11;
- DynamicUpdateSliceRequest dynamic_update_slice_request = 12;
- GetTupleElementRequest get_tuple_element_request = 13;
- InfeedRequest infeed_request = 14;
- MapRequest map_request = 15;
- PadRequest pad_request = 16;
- ParameterRequest parameter_request = 17;
- ReducePrecisionRequest reduce_precision_request = 36;
- ReduceRequest reduce_request = 18;
- ReduceWindowRequest reduce_window_request = 19;
- ReshapeRequest reshape_request = 20;
- ReverseRequest reverse_request = 21;
- RngRequest rng_request = 22;
- SelectAndScatterRequest select_and_scatter_request = 23;
- SliceRequest slice_request = 24;
- TernaryOpRequest ternary_op_request = 25;
- TraceRequest trace_request = 26;
- TransposeRequest transpose_request = 34;
- UnaryOpRequest unary_op_request = 27;
- VariadicOpRequest variadic_op_request = 28;
- WhileRequest while_request = 29;
- SendRequest send_request = 30;
- RecvRequest recv_request = 31;
- OutfeedRequest outfeed_request = 32;
- BatchNormTrainingRequest batch_norm_training_request = 35;
- BatchNormGradRequest batch_norm_grad_request = 37;
- BatchNormInferenceRequest batch_norm_inference_request = 38;
- FftRequest fft_request = 41;
- ConvertRequest bitcast_convert_request = 42;
- ConditionalRequest conditional_request = 44;
- HostComputeRequest host_compute_request = 45;
- GatherRequest gather_request = 46;
- // Next: 47
- }
-}
-
-message OpResponse {
- ComputationDataHandle output = 1;
-}