// Copyright 2015 The Bazel Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package build.remote; option java_package = "com.google.devtools.build.lib.remote"; message ContentDigest { bytes digest = 1; // A hash of the contents of the file, blob, or tree node // (see below). The contents are digested using SHA1 (20 bytes). int64 size_bytes = 2; int32 version = 3; // Will be 0 by default, and increase if we ever change // the hash function used, etc. // And maybe other metadata later. } message FileMetadata { ContentDigest digest = 1; bool executable = 3; } message Platform { // The contents of this message will be determined later, with the purpose of // specifying everything that is required for the action execution, but cannot // be uploaded as part of the inputs. // It will likely contain fields as architecture, vendor, OS + version, // ABI, etc. } // All the fields of this message have the following in common: they are // expected to define a result. All instructions to remote execution that do // not affect the result should be out of this message, because this message // will be used as the Execution Cache key. message Action { // The digest of the command (as an encoded proto blob). // Digested separately, because it is both long and reusable. ContentDigest command_digest = 1; // The Merkle digest of the root input directory. ContentDigest input_root_digest = 2; // Relative paths to the action's output files and directories. // The standard output and error stream contents are always returned // in addition (see ActionResult message). // Any unnamed output files will be discarded. // A path will be interpreted as a directory iff it contains a trailing // slash. // This should be sorted! repeated string output_path = 4; Platform platform = 5; // All other requirements that affect the result of the action should be // here as well. } message Command { // The argument vector. argv[0] is the binary. repeated string argv = 1; // A map of environment variables. // This is part of the command rather than Platform, because it does not // affect scheduling. message EnvironmentEntry { string variable = 1; string value = 2; } // Environment variables, sorted by variable. repeated EnvironmentEntry environment = 2; // Possibly other fields the describe the setup actions of the command. } message Output { string path = 1; // Yes, we shouldn't have to repeat paths here, but it should not be too // costly, and might help. // The actual output bodies will be stored in CAS. oneof content { // Populated for directory outputs only, contains the root FileNode digest. ContentDigest digest = 2; // Populated for file outputs only. FileMetadata file_metadata = 3; } } message ActionResult { // Relative paths to the action's output files and directories. // Any unnamed output files will be discarded. // A path will be interpreted as a directory iff it contains a trailing // slash. // This should be sorted by path. repeated Output output = 1; int32 return_code = 2; ContentDigest stdout_digest = 3; ContentDigest stderr_digest = 4; } // Status message shared by all CAS requests. message CasStatus { bool succeeded = 1; // Whether the requested action had server side errors // or not. enum ErrorCode { UNKNOWN = 0; INVALID_ARGUMENT = 1; // The client behaved incorrectly. error_detail should // have more information. MISSING_DIGEST = 2; // Missing a node on tree download. DIGEST_MISMATCH = 3; // Upload only error, when requested digest does not // match the server side computed one. NODE_PARSE_ERROR = 4; // Failed to parse digested data as node. // more errors... } ErrorCode error = 2; string error_detail = 3; // Human readable error. // These are a common part of the status for many CAS requests: repeated ContentDigest missing_digest = 4; repeated ContentDigest parse_failed_digest = 5; // Only relevant to trees. } service CasService { // Looks up given content keys in CAS, and returns success when found. // The single returned status will have the potentially missing digests, // which need to be re-uploaded. rpc Lookup(CasLookupRequest) returns (CasLookupReply) { } // Uploads a directory tree into CAS. Not streamed, because it is only tree // metadata. rpc UploadTreeMetadata(CasUploadTreeMetadataRequest) returns (CasUploadTreeMetadataReply) { } // Uploads data blob(s) into CAS. rpc UploadBlob(stream CasUploadBlobRequest) returns (CasUploadBlobReply) { } // Downoads a directory tree metadata from CAS. rpc DownloadTreeMetadata(CasDownloadTreeMetadataRequest) returns (CasDownloadTreeMetadataReply) { } // Downoads a directory tree from CAS. Returns the entire root directory. rpc DownloadTree(CasDownloadTreeRequest) returns (stream CasDownloadReply) { } // Downoads data blob(s) from CAS, returns them. rpc DownloadBlob(CasDownloadBlobRequest) returns (stream CasDownloadReply) { } } message FileNode { FileMetadata file_metadata = 1; message Child { string path = 1; ContentDigest digest = 2; } // The children should be sorted by path, and not have equal subdirectory // prefixes. repeated Child child = 2; } message CasLookupRequest { repeated ContentDigest digest = 1; } message CasLookupReply { CasStatus status = 1; } message CasUploadTreeMetadataRequest { repeated FileNode tree_node = 1; } message CasUploadTreeMetadataReply { CasStatus status = 1; } message CasDownloadTreeMetadataRequest { ContentDigest root = 1; } message CasDownloadTreeMetadataReply { CasStatus status = 1; repeated FileNode tree_node = 2; } message BlobChunk { ContentDigest digest = 1; // Present only in first chunk. int64 offset = 2; bytes data = 3; } // This will be used for batching files/blobs. message CasUploadBlobRequest { BlobChunk data = 1; } message CasUploadBlobReply { CasStatus status = 1; } message CasDownloadTreeRequest { ContentDigest root_digest = 1; } // This message is streamed. message CasDownloadReply { CasStatus status = 1; BlobChunk data = 2; // For trees, data is the entire root directory, zipped (for a single root). // For blobs, sequential chunks of multiple blobs. } message CasDownloadBlobRequest { repeated ContentDigest digest = 1; } service ExecutionCacheService { // Gets results of a cached action. rpc GetCachedResult(ExecutionCacheRequest) returns (ExecutionCacheReply) { } // Set results of a cached action. This requires reproducible builds on // connected machines! rpc SetCachedResult(ExecutionCacheSetRequest) returns (ExecutionCacheSetReply) {} } message ExecutionCacheRequest { ContentDigest action_digest = 1; } message ExecutionCacheStatus { // Whether the request had any server side errors. For a lookup (get result) // request, a true value means the result was found in the cache. bool succeeded = 1; enum ErrorCode { UNKNOWN = 0; MISSING_RESULT = 1; // The result was not found in the execution cache. UNSUPPORTED = 2; // The request is not supported by the server. // More server errors... } ErrorCode error = 2; string error_detail = 3; // Human readable error. } message ExecutionCacheReply { ExecutionCacheStatus status = 1; ActionResult result = 2; } message ExecutionCacheSetRequest { ContentDigest action_digest = 1; ActionResult result = 2; } message ExecutionCacheSetReply { ExecutionCacheStatus status = 1; } service ExecuteService { // Executes an action remotely. rpc Execute(ExecuteRequest) returns (stream ExecuteReply) { } } message ExecuteRequest { Action action = 1; bool accept_cached = 2; // Later will probably add previous attempt history, as it will be // useful for monitoring and probably scheduling as well. // These fields will be useful for scheduling, error reporting (e.g. disk // exceeded) and for log analysis. int32 total_input_file_count = 3; int64 total_input_file_bytes = 4; // Used for monitoring and scheduling. BuildInfo build_info = 5; // Timeout milliseconds for running this action. int64 timeout_millis = 6; // Maybe add io_timeout as well, per // Marc Antoine's suggestion. // Add other fields such as required cores, RAM, etc, that affect scheduling, // but not result, later. All the requirements that DO affect results should // be part of the Action. } message BuildInfo { string build_id = 1; // TBD, Fields used to identify a build action. // This will be useful for analysis purposes. // Note: we don't want to put any Bazel-specific fields in this. } message ExecutionStats { // TBD, will contain all the stats related to the execution: // time it took, resources, etc. Maybe will break into sub-messages // for various execution phases. } message ExecuteReply { ExecutionStatus status = 1; ActionResult result = 2; bool cached_result = 3; // Filled by the server on Execution Cache hit. ExecutionStats execution_stats = 4; CasStatus cas_error = 5; // A possible server-side CAS error, e.g. missing // inputs. The message contains the missing digests. // Later will introduce return AttemptHistory for monitoring and use // in requests. } message ExecutionStatus { bool executed = 1; // Whether the action was executed. bool succeeded = 2; // Whether the action succeeded. enum ErrorCode { UNKNOWN_ERROR = 0; MISSING_COMMAND = 1; // Missing command digest in CAS. MISSING_INPUT = 2; // Missing one of the inputs in CAS. DEADLINE_EXCEEDED = 3; EXEC_FAILED = 4; // Action returned non-zero. // Other server errors. Some of these errors are client-retriable, and some // not; will have to comment clearly what will happen on each error. } ErrorCode error = 3; string error_detail = 4; // These fields allow returning streaming statuses for the action progress. enum ActionStage { UNKNOWN_STAGE = 0; QUEUED = 1; EXECUTING = 2; FINISHED = 3; } ActionStage stage = 5; // Optionally will add more details pertaining to current stage, for example // time executing, or position in queue, etc. }