aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/main/protobuf/remote_protocol.proto
blob: c094febe45951c523531065fc611f629e4ce65ba (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
// Copyright 2015 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//    http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

syntax = "proto3";

package build.remote;

option java_package = "com.google.devtools.build.lib.remote";

message ContentDigest {
  bytes digest = 1;  // A hash of the contents of the file, blob, or tree node
  // (see below). The contents are digested using SHA1 (20 bytes).
  int64 size_bytes = 2;
  int32 version = 3;  // Will be 0 by default, and increase if we ever change
  // the hash function used, etc.
  // And maybe other metadata later.
}

message FileMetadata {
  ContentDigest digest = 1;
  bool executable = 3;
}

message Platform {
  // The contents of this message will be determined later, with the purpose of
  // specifying everything that is required for the action execution, but cannot
  // be uploaded as part of the inputs.
  // It will likely contain fields as architecture, vendor, OS + version,
  // ABI, etc.
}

// All the fields of this message have the following in common: they are
// expected to define a result. All instructions to remote execution that do
// not affect the result should be out of this message, because this message
// will be used as the Execution Cache key.
message Action {
  // The digest of the command (as an encoded proto blob).
  // Digested separately, because it is both long and reusable.
  ContentDigest command_digest = 1;
  // The Merkle digest of the root input directory.
  ContentDigest input_root_digest = 2;
  // Relative paths to the action's output files and directories.
  // The standard output and error stream contents are always returned
  // in addition (see ActionResult message).
  // Any unnamed output files will be discarded.
  // A path will be interpreted as a directory iff it contains a trailing
  // slash.
  // This should be sorted!
  repeated string output_path = 4;
  Platform platform = 5;
  // All other requirements that affect the result of the action should be
  // here as well.
}

message Command {
  // The argument vector.  argv[0] is the binary.
  repeated string argv = 1;
  // A map of environment variables.
  // This is part of the command rather than Platform, because it does not
  // affect scheduling.
  message EnvironmentEntry {
     string variable = 1;
     string value = 2;
  }
  // Environment variables, sorted by variable.
  repeated EnvironmentEntry environment = 2;
  // Possibly other fields the describe the setup actions of the command.
}

message Output {
  string path = 1;
  // Yes, we shouldn't have to repeat paths here, but it should not be too
  // costly, and might help.
  // The actual output bodies will be stored in CAS.
  oneof content {
    // Populated for directory outputs only, contains the root FileNode digest.
    ContentDigest digest = 2;
    // Populated for file outputs only.
    FileMetadata file_metadata = 3;
  }
}

message ActionResult {
  // Relative paths to the action's output files and directories.
  // Any unnamed output files will be discarded.
  // A path will be interpreted as a directory iff it contains a trailing
  // slash.
  // This should be sorted by path.
  repeated Output output = 1;
  int32 return_code = 2;
  ContentDigest stdout_digest = 3;
  ContentDigest stderr_digest = 4;
}

// Status message shared by all CAS requests.
message CasStatus {
  bool succeeded = 1;  // Whether the requested action had server side errors
  // or not.
  enum ErrorCode {
    UNKNOWN = 0;
    INVALID_ARGUMENT = 1; // The client behaved incorrectly. error_detail should
                          // have more information.
    MISSING_DIGEST = 2;  // Missing a node on tree download.
    DIGEST_MISMATCH = 3;  // Upload only error, when requested digest does not
                          // match the server side computed one.
    NODE_PARSE_ERROR = 4;  // Failed to parse digested data as node.
    // more errors...
  }
  ErrorCode error = 2;
  string error_detail = 3;  // Human readable error.
  // These are a common part of the status for many CAS requests:
  repeated ContentDigest missing_digest = 4;
  repeated ContentDigest parse_failed_digest = 5;  // Only relevant to trees.
}

service CasService {
  // Looks up given content keys in CAS, and returns success when found.
  // The single returned status will have the potentially missing digests,
  // which need to be re-uploaded.
  rpc Lookup(CasLookupRequest) returns (CasLookupReply) { }
  // Uploads a directory tree into CAS. Not streamed, because it is only tree
  // metadata.
  rpc UploadTreeMetadata(CasUploadTreeMetadataRequest) returns
      (CasUploadTreeMetadataReply) { }
  // Uploads data blob(s) into CAS.
  rpc UploadBlob(stream CasUploadBlobRequest) returns (CasUploadBlobReply) { }
  // Downoads a directory tree metadata from CAS.
  rpc DownloadTreeMetadata(CasDownloadTreeMetadataRequest) returns
      (CasDownloadTreeMetadataReply) { }
  // Downoads a directory tree from CAS. Returns the entire root directory.
  rpc DownloadTree(CasDownloadTreeRequest) returns (stream CasDownloadReply) { }
  // Downoads data blob(s) from CAS, returns them.
  rpc DownloadBlob(CasDownloadBlobRequest) returns (stream CasDownloadReply) { }
}

message FileNode {
  FileMetadata file_metadata = 1;
  message Child {
    string path = 1;
    ContentDigest digest = 2;
  }
  // The children should be sorted by path, and not have equal subdirectory
  // prefixes.
  repeated Child child = 2;
}

message CasLookupRequest {
  repeated ContentDigest digest = 1;
}

message CasLookupReply {
  CasStatus status = 1;
}

message CasUploadTreeMetadataRequest {
  repeated FileNode tree_node = 1;
}

message CasUploadTreeMetadataReply {
  CasStatus status = 1;
}

message CasDownloadTreeMetadataRequest {
  ContentDigest root = 1;
}

message CasDownloadTreeMetadataReply {
  CasStatus status = 1;
  repeated FileNode tree_node = 2;
}

message BlobChunk {
  ContentDigest digest = 1;  // Present only in first chunk.
  int64 offset = 2;
  bytes data = 3;
}

// This will be used for batching files/blobs.
message CasUploadBlobRequest {
  BlobChunk data = 1;
}

message CasUploadBlobReply {
  CasStatus status = 1;
}

message CasDownloadTreeRequest {
  ContentDigest root_digest = 1;
}

// This message is streamed.
message CasDownloadReply {
  CasStatus status = 1;
  BlobChunk data = 2;
  // For trees, data is the entire root directory, zipped (for a single root).
  // For blobs, sequential chunks of multiple blobs.
}

message CasDownloadBlobRequest {
  repeated ContentDigest digest = 1;
}

service ExecutionCacheService {
    // Gets results of a cached action.
  rpc GetCachedResult(ExecutionCacheRequest) returns (ExecutionCacheReply) { }
  // Set results of a cached action. This requires reproducible builds on
  // connected machines!
  rpc SetCachedResult(ExecutionCacheSetRequest) returns
      (ExecutionCacheSetReply) {}
}

message ExecutionCacheRequest {
  ContentDigest action_digest = 1;
}

message ExecutionCacheStatus {
  // Whether the request had any server side errors. For a lookup (get result)
  // request, a true value means the result was found in the cache.
  bool succeeded = 1;
  enum ErrorCode {
    UNKNOWN = 0;
    MISSING_RESULT = 1;  // The result was not found in the execution cache.
    UNSUPPORTED = 2;  // The request is not supported by the server.
    // More server errors...
  }
  ErrorCode error = 2;
  string error_detail = 3;  // Human readable error.
}

message ExecutionCacheReply {
  ExecutionCacheStatus status = 1;
  ActionResult result = 2;
}

message ExecutionCacheSetRequest {
  ContentDigest action_digest = 1;
  ActionResult result = 2;
}

message ExecutionCacheSetReply {
  ExecutionCacheStatus status = 1;
}

service ExecuteService {
  // Executes an action remotely.
  rpc Execute(ExecuteRequest) returns (stream ExecuteReply) { }
}

message ExecuteRequest {
  Action action = 1;
  bool accept_cached = 2;
  // Later will probably add previous attempt history, as it will be
  // useful for monitoring and probably scheduling as well.
  // These fields will be useful for scheduling, error reporting (e.g. disk
  // exceeded) and for log analysis.
  int32 total_input_file_count = 3;
  int64 total_input_file_bytes = 4;
  // Used for monitoring and scheduling.
  BuildInfo build_info = 5;
  // Timeout milliseconds for running this action.
  int64 timeout_millis = 6;  // Maybe add io_timeout as well, per
  // Marc Antoine's suggestion.
  // Add other fields such as required cores, RAM, etc, that affect scheduling,
  // but not result, later. All the requirements that DO affect results should
  // be part of the Action.
}

message BuildInfo {
  string build_id = 1;
  // TBD, Fields used to identify a build action.
  // This will be useful for analysis purposes.
  // Note: we don't want to put any Bazel-specific fields in this.
}

message ExecutionStats {
//  TBD, will contain all the stats related to the execution:
// time it took, resources, etc. Maybe will break into sub-messages
// for various execution phases.
}

message ExecuteReply {
  ExecutionStatus status = 1;
  ActionResult result = 2;
  bool cached_result = 3;  // Filled by the server on Execution Cache hit.
  ExecutionStats execution_stats = 4;
  CasStatus cas_error = 5;  // A possible server-side CAS error, e.g. missing
  // inputs. The message contains the missing digests.
  // Later will introduce return AttemptHistory for monitoring and use
  // in requests.
}

message ExecutionStatus {
  bool executed = 1;  // Whether the action was executed.
  bool succeeded = 2;  // Whether the action succeeded.
  enum ErrorCode {
    UNKNOWN_ERROR = 0;
    MISSING_COMMAND = 1;  // Missing command digest in CAS.
    MISSING_INPUT = 2;  // Missing one of the inputs in CAS.
    DEADLINE_EXCEEDED = 3;
    EXEC_FAILED = 4;  // Action returned non-zero.
    // Other server errors. Some of these errors are client-retriable, and some
    // not; will have to comment clearly what will happen on each error.
  }
  ErrorCode error = 3;
  string error_detail = 4;
  // These fields allow returning streaming statuses for the action progress.
  enum ActionStage {
    UNKNOWN_STAGE = 0;
    QUEUED = 1;
    EXECUTING = 2;
    FINISHED = 3;
  }
  ActionStage stage = 5;
  // Optionally will add more details pertaining to current stage, for example
  // time executing, or position in queue, etc.
}