xref: /aosp_15_r20/external/bazelbuild-remote-apis/build/bazel/remote/execution/v2/remote_execution.proto (revision ae21b2b400d1606a797985382019aea74177085c)
1// Copyright 2018 The Bazel Authors.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15syntax = "proto3";
16
17package build.bazel.remote.execution.v2;
18
19import "build/bazel/semver/semver.proto";
20import "google/api/annotations.proto";
21import "google/longrunning/operations.proto";
22import "google/protobuf/any.proto";
23import "google/protobuf/duration.proto";
24import "google/protobuf/timestamp.proto";
25import "google/protobuf/wrappers.proto";
26import "google/rpc/status.proto";
27
28option csharp_namespace = "Build.Bazel.Remote.Execution.V2";
29option go_package = "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2;remoteexecution";
30option java_multiple_files = true;
31option java_outer_classname = "RemoteExecutionProto";
32option java_package = "build.bazel.remote.execution.v2";
33option objc_class_prefix = "REX";
34
35
36// The Remote Execution API is used to execute an
37// [Action][build.bazel.remote.execution.v2.Action] on the remote
38// workers.
39//
40// As with other services in the Remote Execution API, any call may return an
41// error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
42// information about when the client should retry the request; clients SHOULD
43// respect the information provided.
44service Execution {
45  // Execute an action remotely.
46  //
47  // In order to execute an action, the client must first upload all of the
48  // inputs, the
49  // [Command][build.bazel.remote.execution.v2.Command] to run, and the
50  // [Action][build.bazel.remote.execution.v2.Action] into the
51  // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
52  // It then calls `Execute` with an `action_digest` referring to them. The
53  // server will run the action and eventually return the result.
54  //
55  // The input `Action`'s fields MUST meet the various canonicalization
56  // requirements specified in the documentation for their types so that it has
57  // the same digest as other logically equivalent `Action`s. The server MAY
58  // enforce the requirements and return errors if a non-canonical input is
59  // received. It MAY also proceed without verifying some or all of the
60  // requirements, such as for performance reasons. If the server does not
61  // verify the requirement, then it will treat the `Action` as distinct from
62  // another logically equivalent action if they hash differently.
63  //
64  // Returns a stream of
65  // [google.longrunning.Operation][google.longrunning.Operation] messages
66  // describing the resulting execution, with eventual `response`
67  // [ExecuteResponse][build.bazel.remote.execution.v2.ExecuteResponse]. The
68  // `metadata` on the operation is of type
69  // [ExecuteOperationMetadata][build.bazel.remote.execution.v2.ExecuteOperationMetadata].
70  //
71  // If the client remains connected after the first response is returned after
72  // the server, then updates are streamed as if the client had called
73  // [WaitExecution][build.bazel.remote.execution.v2.Execution.WaitExecution]
74  // until the execution completes or the request reaches an error. The
75  // operation can also be queried using [Operations
76  // API][google.longrunning.Operations.GetOperation].
77  //
78  // The server NEED NOT implement other methods or functionality of the
79  // Operations API.
80  //
81  // Errors discovered during creation of the `Operation` will be reported
82  // as gRPC Status errors, while errors that occurred while running the
83  // action will be reported in the `status` field of the `ExecuteResponse`. The
84  // server MUST NOT set the `error` field of the `Operation` proto.
85  // The possible errors include:
86  //
87  // * `INVALID_ARGUMENT`: One or more arguments are invalid.
88  // * `FAILED_PRECONDITION`: One or more errors occurred in setting up the
89  //   action requested, such as a missing input or command or no worker being
90  //   available. The client may be able to fix the errors and retry.
91  // * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to run
92  //   the action.
93  // * `UNAVAILABLE`: Due to a transient condition, such as all workers being
94  //   occupied (and the server does not support a queue), the action could not
95  //   be started. The client should retry.
96  // * `INTERNAL`: An internal error occurred in the execution engine or the
97  //   worker.
98  // * `DEADLINE_EXCEEDED`: The execution timed out.
99  // * `CANCELLED`: The operation was cancelled by the client. This status is
100  //   only possible if the server implements the Operations API CancelOperation
101  //   method, and it was called for the current execution.
102  //
103  // In the case of a missing input or command, the server SHOULD additionally
104  // send a [PreconditionFailure][google.rpc.PreconditionFailure] error detail
105  // where, for each requested blob not present in the CAS, there is a
106  // `Violation` with a `type` of `MISSING` and a `subject` of
107  // `"blobs/{digest_function/}{hash}/{size}"` indicating the digest of the
108  // missing blob. The `subject` is formatted the same way as the
109  // `resource_name` provided to
110  // [ByteStream.Read][google.bytestream.ByteStream.Read], with the leading
111  // instance name omitted. `digest_function` MUST thus be omitted if its value
112  // is one of MD5, MURMUR3, SHA1, SHA256, SHA384, SHA512, or VSO.
113  //
114  // The server does not need to guarantee that a call to this method leads to
115  // at most one execution of the action. The server MAY execute the action
116  // multiple times, potentially in parallel. These redundant executions MAY
117  // continue to run, even if the operation is completed.
118  rpc Execute(ExecuteRequest) returns (stream google.longrunning.Operation) {
119    option (google.api.http) = { post: "/v2/{instance_name=**}/actions:execute" body: "*" };
120  }
121
122  // Wait for an execution operation to complete. When the client initially
123  // makes the request, the server immediately responds with the current status
124  // of the execution. The server will leave the request stream open until the
125  // operation completes, and then respond with the completed operation. The
126  // server MAY choose to stream additional updates as execution progresses,
127  // such as to provide an update as to the state of the execution.
128  //
129  // In addition to the cases describe for Execute, the WaitExecution method
130  // may fail as follows:
131  //
132  // * `NOT_FOUND`: The operation no longer exists due to any of a transient
133  //   condition, an unknown operation name, or if the server implements the
134  //   Operations API DeleteOperation method and it was called for the current
135  //   execution. The client should call `Execute` to retry.
136  rpc WaitExecution(WaitExecutionRequest) returns (stream google.longrunning.Operation) {
137    option (google.api.http) = { post: "/v2/{name=operations/**}:waitExecution" body: "*" };
138  }
139}
140
141// The action cache API is used to query whether a given action has already been
142// performed and, if so, retrieve its result. Unlike the
143// [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage],
144// which addresses blobs by their own content, the action cache addresses the
145// [ActionResult][build.bazel.remote.execution.v2.ActionResult] by a
146// digest of the encoded [Action][build.bazel.remote.execution.v2.Action]
147// which produced them.
148//
149// The lifetime of entries in the action cache is implementation-specific, but
150// the server SHOULD assume that more recently used entries are more likely to
151// be used again.
152//
153// As with other services in the Remote Execution API, any call may return an
154// error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
155// information about when the client should retry the request; clients SHOULD
156// respect the information provided.
157service ActionCache {
158  // Retrieve a cached execution result.
159  //
160  // Implementations SHOULD ensure that any blobs referenced from the
161  // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
162  // are available at the time of returning the
163  // [ActionResult][build.bazel.remote.execution.v2.ActionResult] and will be
164  // for some period of time afterwards. The lifetimes of the referenced blobs SHOULD be increased
165  // if necessary and applicable.
166  //
167  // Errors:
168  //
169  // * `NOT_FOUND`: The requested `ActionResult` is not in the cache.
170  rpc GetActionResult(GetActionResultRequest) returns (ActionResult) {
171    option (google.api.http) = { get: "/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}" };
172  }
173
174  // Upload a new execution result.
175  //
176  // In order to allow the server to perform access control based on the type of
177  // action, and to assist with client debugging, the client MUST first upload
178  // the [Action][build.bazel.remote.execution.v2.Execution] that produced the
179  // result, along with its
180  // [Command][build.bazel.remote.execution.v2.Command], into the
181  // `ContentAddressableStorage`.
182  //
183  // Server implementations MAY modify the
184  // `UpdateActionResultRequest.action_result` and return an equivalent value.
185  //
186  // Errors:
187  //
188  // * `INVALID_ARGUMENT`: One or more arguments are invalid.
189  // * `FAILED_PRECONDITION`: One or more errors occurred in updating the
190  //   action result, such as a missing command or action.
191  // * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the
192  //   entry to the cache.
193  rpc UpdateActionResult(UpdateActionResultRequest) returns (ActionResult) {
194    option (google.api.http) = { put: "/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}" body: "action_result" };
195  }
196}
197
198// The CAS (content-addressable storage) is used to store the inputs to and
199// outputs from the execution service. Each piece of content is addressed by the
200// digest of its binary data.
201//
202// Most of the binary data stored in the CAS is opaque to the execution engine,
203// and is only used as a communication medium. In order to build an
204// [Action][build.bazel.remote.execution.v2.Action],
205// however, the client will need to also upload the
206// [Command][build.bazel.remote.execution.v2.Command] and input root
207// [Directory][build.bazel.remote.execution.v2.Directory] for the Action.
208// The Command and Directory messages must be marshalled to wire format and then
209// uploaded under the hash as with any other piece of content. In practice, the
210// input root directory is likely to refer to other Directories in its
211// hierarchy, which must also each be uploaded on their own.
212//
213// For small file uploads the client should group them together and call
214// [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
215//
216// For large uploads, the client must use the
217// [Write method][google.bytestream.ByteStream.Write] of the ByteStream API.
218//
219// For uncompressed data, The `WriteRequest.resource_name` is of the following form:
220// `{instance_name}/uploads/{uuid}/blobs/{digest_function/}{hash}/{size}{/optional_metadata}`
221//
222// Where:
223// * `instance_name` is an identifier used to distinguish between the various
224//   instances on the server. Syntax and semantics of this field are defined
225//   by the server; Clients must not make any assumptions about it (e.g.,
226//   whether it spans multiple path segments or not). If it is the empty path,
227//   the leading slash is omitted, so that  the `resource_name` becomes
228//   `uploads/{uuid}/blobs/{digest_function/}{hash}/{size}{/optional_metadata}`.
229//   To simplify parsing, a path segment cannot equal any of the following
230//   keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations`,
231//   `capabilities` or `compressed-blobs`.
232// * `uuid` is a version 4 UUID generated by the client, used to avoid
233//   collisions between concurrent uploads of the same data. Clients MAY
234//   reuse the same `uuid` for uploading different blobs.
235// * `digest_function` is a lowercase string form of a `DigestFunction.Value`
236//   enum, indicating which digest function was used to compute `hash`. If the
237//   digest function used is one of MD5, MURMUR3, SHA1, SHA256, SHA384, SHA512,
238//   or VSO, this component MUST be omitted. In that case the server SHOULD
239//   infer the digest function using the length of the `hash` and the digest
240//   functions announced in the server's capabilities.
241// * `hash` and `size` refer to the [Digest][build.bazel.remote.execution.v2.Digest]
242//   of the data being uploaded.
243// * `optional_metadata` is implementation specific data, which clients MAY omit.
244//   Servers MAY ignore this metadata.
245//
246// Data can alternatively be uploaded in compressed form, with the following
247// `WriteRequest.resource_name` form:
248// `{instance_name}/uploads/{uuid}/compressed-blobs/{compressor}/{digest_function/}{uncompressed_hash}/{uncompressed_size}{/optional_metadata}`
249//
250// Where:
251// * `instance_name`, `uuid`, `digest_function` and `optional_metadata` are
252//   defined as above.
253// * `compressor` is a lowercase string form of a `Compressor.Value` enum
254//   other than `identity`, which is supported by the server and advertised in
255//   [CacheCapabilities.supported_compressor][build.bazel.remote.execution.v2.CacheCapabilities.supported_compressor].
256// * `uncompressed_hash` and `uncompressed_size` refer to the
257//   [Digest][build.bazel.remote.execution.v2.Digest] of the data being
258//   uploaded, once uncompressed. Servers MUST verify that these match
259//   the uploaded data once uncompressed, and MUST return an
260//   `INVALID_ARGUMENT` error in the case of mismatch.
261//
262// Note that when writing compressed blobs, the `WriteRequest.write_offset` in
263// the initial request in a stream refers to the offset in the uncompressed form
264// of the blob. In subsequent requests, `WriteRequest.write_offset` MUST be the
265// sum of the first request's 'WriteRequest.write_offset' and the total size of
266// all the compressed data bundles in the previous requests.
267// Note that this mixes an uncompressed offset with a compressed byte length,
268// which is nonsensical, but it is done to fit the semantics of the existing
269// ByteStream protocol.
270//
271// Uploads of the same data MAY occur concurrently in any form, compressed or
272// uncompressed.
273//
274// Clients SHOULD NOT use gRPC-level compression for ByteStream API `Write`
275// calls of compressed blobs, since this would compress already-compressed data.
276//
277// When attempting an upload, if another client has already completed the upload
278// (which may occur in the middle of a single upload if another client uploads
279// the same blob concurrently), the request will terminate immediately without
280// error, and with a response whose `committed_size` is the value `-1` if this
281// is a compressed upload, or with the full size of the uploaded file if this is
282// an uncompressed upload (regardless of how much data was transmitted by the
283// client). If the client completes the upload but the
284// [Digest][build.bazel.remote.execution.v2.Digest] does not match, an
285// `INVALID_ARGUMENT` error will be returned. In either case, the client should
286// not attempt to retry the upload.
287//
288// Small downloads can be grouped and requested in a batch via
289// [BatchReadBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchReadBlobs].
290//
291// For large downloads, the client must use the
292// [Read method][google.bytestream.ByteStream.Read] of the ByteStream API.
293//
294// For uncompressed data, The `ReadRequest.resource_name` is of the following form:
295// `{instance_name}/blobs/{digest_function/}{hash}/{size}`
296// Where `instance_name`, `digest_function`, `hash` and `size` are defined as
297// for uploads.
298//
299// Data can alternatively be downloaded in compressed form, with the following
300// `ReadRequest.resource_name` form:
301// `{instance_name}/compressed-blobs/{compressor}/{digest_function/}{uncompressed_hash}/{uncompressed_size}`
302//
303// Where:
304// * `instance_name`, `compressor` and `digest_function` are defined as for
305//   uploads.
306// * `uncompressed_hash` and `uncompressed_size` refer to the
307//   [Digest][build.bazel.remote.execution.v2.Digest] of the data being
308//   downloaded, once uncompressed. Clients MUST verify that these match
309//   the downloaded data once uncompressed, and take appropriate steps in
310//   the case of failure such as retrying a limited number of times or
311//   surfacing an error to the user.
312//
313// When downloading compressed blobs:
314// * `ReadRequest.read_offset` refers to the offset in the uncompressed form
315//   of the blob.
316// * Servers MUST return `INVALID_ARGUMENT` if `ReadRequest.read_limit` is
317//   non-zero.
318// * Servers MAY use any compression level they choose, including different
319//   levels for different blobs (e.g. choosing a level designed for maximum
320//   speed for data known to be incompressible).
321// * Clients SHOULD NOT use gRPC-level compression, since this would compress
322//   already-compressed data.
323//
324// Servers MUST be able to provide data for all recently advertised blobs in
325// each of the compression formats that the server supports, as well as in
326// uncompressed form.
327//
328// The lifetime of entries in the CAS is implementation specific, but it SHOULD
329// be long enough to allow for newly-added and recently looked-up entries to be
330// used in subsequent calls (e.g. to
331// [Execute][build.bazel.remote.execution.v2.Execution.Execute]).
332//
333// Servers MUST behave as though empty blobs are always available, even if they
334// have not been uploaded. Clients MAY optimize away the uploading or
335// downloading of empty blobs.
336//
337// As with other services in the Remote Execution API, any call may return an
338// error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
339// information about when the client should retry the request; clients SHOULD
340// respect the information provided.
341service ContentAddressableStorage {
342  // Determine if blobs are present in the CAS.
343  //
344  // Clients can use this API before uploading blobs to determine which ones are
345  // already present in the CAS and do not need to be uploaded again.
346  //
347  // Servers SHOULD increase the lifetimes of the referenced blobs if necessary and
348  // applicable.
349  //
350  // There are no method-specific errors.
351  rpc FindMissingBlobs(FindMissingBlobsRequest) returns (FindMissingBlobsResponse) {
352    option (google.api.http) = { post: "/v2/{instance_name=**}/blobs:findMissing" body: "*" };
353  }
354
355  // Upload many blobs at once.
356  //
357  // The server may enforce a limit of the combined total size of blobs
358  // to be uploaded using this API. This limit may be obtained using the
359  // [Capabilities][build.bazel.remote.execution.v2.Capabilities] API.
360  // Requests exceeding the limit should either be split into smaller
361  // chunks or uploaded using the
362  // [ByteStream API][google.bytestream.ByteStream], as appropriate.
363  //
364  // This request is equivalent to calling a Bytestream `Write` request
365  // on each individual blob, in parallel. The requests may succeed or fail
366  // independently.
367  //
368  // Errors:
369  //
370  // * `INVALID_ARGUMENT`: The client attempted to upload more than the
371  //   server supported limit.
372  //
373  // Individual requests may return the following errors, additionally:
374  //
375  // * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob.
376  // * `INVALID_ARGUMENT`: The
377  // [Digest][build.bazel.remote.execution.v2.Digest] does not match the
378  // provided data.
379  rpc BatchUpdateBlobs(BatchUpdateBlobsRequest) returns (BatchUpdateBlobsResponse) {
380    option (google.api.http) = { post: "/v2/{instance_name=**}/blobs:batchUpdate" body: "*" };
381  }
382
383  // Download many blobs at once.
384  //
385  // The server may enforce a limit of the combined total size of blobs
386  // to be downloaded using this API. This limit may be obtained using the
387  // [Capabilities][build.bazel.remote.execution.v2.Capabilities] API.
388  // Requests exceeding the limit should either be split into smaller
389  // chunks or downloaded using the
390  // [ByteStream API][google.bytestream.ByteStream], as appropriate.
391  //
392  // This request is equivalent to calling a Bytestream `Read` request
393  // on each individual blob, in parallel. The requests may succeed or fail
394  // independently.
395  //
396  // Errors:
397  //
398  // * `INVALID_ARGUMENT`: The client attempted to read more than the
399  //   server supported limit.
400  //
401  // Every error on individual read will be returned in the corresponding digest
402  // status.
403  rpc BatchReadBlobs(BatchReadBlobsRequest) returns (BatchReadBlobsResponse) {
404    option (google.api.http) = { post: "/v2/{instance_name=**}/blobs:batchRead" body: "*" };
405  }
406
407  // Fetch the entire directory tree rooted at a node.
408  //
409  // This request must be targeted at a
410  // [Directory][build.bazel.remote.execution.v2.Directory] stored in the
411  // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
412  // (CAS). The server will enumerate the `Directory` tree recursively and
413  // return every node descended from the root.
414  //
415  // The GetTreeRequest.page_token parameter can be used to skip ahead in
416  // the stream (e.g. when retrying a partially completed and aborted request),
417  // by setting it to a value taken from GetTreeResponse.next_page_token of the
418  // last successfully processed GetTreeResponse).
419  //
420  // The exact traversal order is unspecified and, unless retrieving subsequent
421  // pages from an earlier request, is not guaranteed to be stable across
422  // multiple invocations of `GetTree`.
423  //
424  // If part of the tree is missing from the CAS, the server will return the
425  // portion present and omit the rest.
426  //
427  // Errors:
428  //
429  // * `NOT_FOUND`: The requested tree root is not present in the CAS.
430  rpc GetTree(GetTreeRequest) returns (stream GetTreeResponse) {
431    option (google.api.http) = { get: "/v2/{instance_name=**}/blobs/{root_digest.hash}/{root_digest.size_bytes}:getTree" };
432  }
433}
434
435// The Capabilities service may be used by remote execution clients to query
436// various server properties, in order to self-configure or return meaningful
437// error messages.
438//
439// The query may include a particular `instance_name`, in which case the values
440// returned will pertain to that instance.
441service Capabilities {
442  // GetCapabilities returns the server capabilities configuration of the
443  // remote endpoint.
444  // Only the capabilities of the services supported by the endpoint will
445  // be returned:
446  // * Execution + CAS + Action Cache endpoints should return both
447  //   CacheCapabilities and ExecutionCapabilities.
448  // * Execution only endpoints should return ExecutionCapabilities.
449  // * CAS + Action Cache only endpoints should return CacheCapabilities.
450  //
451  // There are no method-specific errors.
452  rpc GetCapabilities(GetCapabilitiesRequest) returns (ServerCapabilities) {
453    option (google.api.http) = {
454      get: "/v2/{instance_name=**}/capabilities"
455    };
456  }
457}
458
459// An `Action` captures all the information about an execution which is required
460// to reproduce it.
461//
462// `Action`s are the core component of the [Execution] service. A single
463// `Action` represents a repeatable action that can be performed by the
464// execution service. `Action`s can be succinctly identified by the digest of
465// their wire format encoding and, once an `Action` has been executed, will be
466// cached in the action cache. Future requests can then use the cached result
467// rather than needing to run afresh.
468//
469// When a server completes execution of an
470// [Action][build.bazel.remote.execution.v2.Action], it MAY choose to
471// cache the [result][build.bazel.remote.execution.v2.ActionResult] in
472// the [ActionCache][build.bazel.remote.execution.v2.ActionCache] unless
473// `do_not_cache` is `true`. Clients SHOULD expect the server to do so. By
474// default, future calls to
475// [Execute][build.bazel.remote.execution.v2.Execution.Execute] the same
476// `Action` will also serve their results from the cache. Clients must take care
477// to understand the caching behaviour. Ideally, all `Action`s will be
478// reproducible so that serving a result from cache is always desirable and
479// correct.
480message Action {
481  // The digest of the [Command][build.bazel.remote.execution.v2.Command]
482  // to run, which MUST be present in the
483  // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
484  Digest command_digest = 1;
485
486  // The digest of the root
487  // [Directory][build.bazel.remote.execution.v2.Directory] for the input
488  // files. The files in the directory tree are available in the correct
489  // location on the build machine before the command is executed. The root
490  // directory, as well as every subdirectory and content blob referred to, MUST
491  // be in the
492  // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
493  Digest input_root_digest = 2;
494
495  reserved 3 to 5; // Used for fields moved to [Command][build.bazel.remote.execution.v2.Command].
496
497  // A timeout after which the execution should be killed. If the timeout is
498  // absent, then the client is specifying that the execution should continue
499  // as long as the server will let it. The server SHOULD impose a timeout if
500  // the client does not specify one, however, if the client does specify a
501  // timeout that is longer than the server's maximum timeout, the server MUST
502  // reject the request.
503  //
504  // The timeout is only intended to cover the "execution" of the specified
505  // action and not time in queue nor any overheads before or after execution
506  // such as marshalling inputs/outputs. The server SHOULD avoid including time
507  // spent the client doesn't have control over, and MAY extend or reduce the
508  // timeout to account for delays or speedups that occur during execution
509  // itself (e.g., lazily loading data from the Content Addressable Storage,
510  // live migration of virtual machines, emulation overhead).
511  //
512  // The timeout is a part of the
513  // [Action][build.bazel.remote.execution.v2.Action] message, and
514  // therefore two `Actions` with different timeouts are different, even if they
515  // are otherwise identical. This is because, if they were not, running an
516  // `Action` with a lower timeout than is required might result in a cache hit
517  // from an execution run with a longer timeout, hiding the fact that the
518  // timeout is too short. By encoding it directly in the `Action`, a lower
519  // timeout will result in a cache miss and the execution timeout will fail
520  // immediately, rather than whenever the cache entry gets evicted.
521  google.protobuf.Duration timeout = 6;
522
523  // If true, then the `Action`'s result cannot be cached, and in-flight
524  // requests for the same `Action` may not be merged.
525  bool do_not_cache = 7;
526
527  reserved 8; // Used for field moved to [Command][build.bazel.remote.execution.v2.Command].
528
529  // An optional additional salt value used to place this `Action` into a
530  // separate cache namespace from other instances having the same field
531  // contents. This salt typically comes from operational configuration
532  // specific to sources such as repo and service configuration,
533  // and allows disowning an entire set of ActionResults that might have been
534  // poisoned by buggy software or tool failures.
535  bytes salt = 9;
536
537  // The optional platform requirements for the execution environment. The
538  // server MAY choose to execute the action on any worker satisfying the
539  // requirements, so the client SHOULD ensure that running the action on any
540  // such worker will have the same result.  A detailed lexicon for this can be
541  // found in the accompanying platform.md.
542  // New in version 2.2: clients SHOULD set these platform properties as well
543  // as those in the [Command][build.bazel.remote.execution.v2.Command]. Servers
544  // SHOULD prefer those set here.
545  Platform platform = 10;
546}
547
548// A `Command` is the actual command executed by a worker running an
549// [Action][build.bazel.remote.execution.v2.Action] and specifications of its
550// environment.
551//
552// Except as otherwise required, the environment (such as which system
553// libraries or binaries are available, and what filesystems are mounted where)
554// is defined by and specific to the implementation of the remote execution API.
555message Command {
556  // An `EnvironmentVariable` is one variable to set in the running program's
557  // environment.
558  message EnvironmentVariable {
559    // The variable name.
560    string name = 1;
561
562    // The variable value.
563    string value = 2;
564  }
565
566  // The arguments to the command.
567  //
568  // The first argument specifies the command to run, which may be either an
569  // absolute path, a path relative to the working directory, or an unqualified
570  // path (without path separators) which will be resolved using the operating
571  // system's equivalent of the PATH environment variable. Path separators
572  // native to the operating system running on the worker SHOULD be used. If the
573  // `environment_variables` list contains an entry for the PATH environment
574  // variable, it SHOULD be respected. If not, the resolution process is
575  // implementation-defined.
576  //
577  // Changed in v2.3. v2.2 and older require that no PATH lookups are performed,
578  // and that relative paths are resolved relative to the input root. This
579  // behavior can, however, not be relied upon, as most implementations already
580  // followed the rules described above.
581  repeated string arguments = 1;
582
583  // The environment variables to set when running the program. The worker may
584  // provide its own default environment variables; these defaults can be
585  // overridden using this field. Additional variables can also be specified.
586  //
587  // In order to ensure that equivalent
588  // [Command][build.bazel.remote.execution.v2.Command]s always hash to the same
589  // value, the environment variables MUST be lexicographically sorted by name.
590  // Sorting of strings is done by code point, equivalently, by the UTF-8 bytes.
591  repeated EnvironmentVariable environment_variables = 2;
592
593  // A list of the output files that the client expects to retrieve from the
594  // action. Only the listed files, as well as directories listed in
595  // `output_directories`, will be returned to the client as output.
596  // Other files or directories that may be created during command execution
597  // are discarded.
598  //
599  // The paths are relative to the working directory of the action execution.
600  // The paths are specified using a single forward slash (`/`) as a path
601  // separator, even if the execution platform natively uses a different
602  // separator. The path MUST NOT include a trailing slash, nor a leading slash,
603  // being a relative path.
604  //
605  // In order to ensure consistent hashing of the same Action, the output paths
606  // MUST be sorted lexicographically by code point (or, equivalently, by UTF-8
607  // bytes).
608  //
609  // An output file cannot be duplicated, be a parent of another output file, or
610  // have the same path as any of the listed output directories.
611  //
612  // Directories leading up to the output files are created by the worker prior
613  // to execution, even if they are not explicitly part of the input root.
614  //
615  // DEPRECATED since v2.1: Use `output_paths` instead.
616  repeated string output_files = 3 [ deprecated = true ];
617
618  // A list of the output directories that the client expects to retrieve from
619  // the action. Only the listed directories will be returned (an entire
620  // directory structure will be returned as a
621  // [Tree][build.bazel.remote.execution.v2.Tree] message digest, see
622  // [OutputDirectory][build.bazel.remote.execution.v2.OutputDirectory]), as
623  // well as files listed in `output_files`. Other files or directories that
624  // may be created during command execution are discarded.
625  //
626  // The paths are relative to the working directory of the action execution.
627  // The paths are specified using a single forward slash (`/`) as a path
628  // separator, even if the execution platform natively uses a different
629  // separator. The path MUST NOT include a trailing slash, nor a leading slash,
630  // being a relative path. The special value of empty string is allowed,
631  // although not recommended, and can be used to capture the entire working
632  // directory tree, including inputs.
633  //
634  // In order to ensure consistent hashing of the same Action, the output paths
635  // MUST be sorted lexicographically by code point (or, equivalently, by UTF-8
636  // bytes).
637  //
638  // An output directory cannot be duplicated or have the same path as any of
639  // the listed output files. An output directory is allowed to be a parent of
640  // another output directory.
641  //
642  // Directories leading up to the output directories (but not the output
643  // directories themselves) are created by the worker prior to execution, even
644  // if they are not explicitly part of the input root.
645  //
646  // DEPRECATED since 2.1: Use `output_paths` instead.
647  repeated string output_directories = 4 [ deprecated = true ];
648
649  // A list of the output paths that the client expects to retrieve from the
650  // action. Only the listed paths will be returned to the client as output.
651  // The type of the output (file or directory) is not specified, and will be
652  // determined by the server after action execution. If the resulting path is
653  // a file, it will be returned in an
654  // [OutputFile][build.bazel.remote.execution.v2.OutputFile] typed field.
655  // If the path is a directory, the entire directory structure will be returned
656  // as a [Tree][build.bazel.remote.execution.v2.Tree] message digest, see
657  // [OutputDirectory][build.bazel.remote.execution.v2.OutputDirectory]
658  // Other files or directories that may be created during command execution
659  // are discarded.
660  //
661  // The paths are relative to the working directory of the action execution.
662  // The paths are specified using a single forward slash (`/`) as a path
663  // separator, even if the execution platform natively uses a different
664  // separator. The path MUST NOT include a trailing slash, nor a leading slash,
665  // being a relative path.
666  //
667  // In order to ensure consistent hashing of the same Action, the output paths
668  // MUST be deduplicated and sorted lexicographically by code point (or,
669  // equivalently, by UTF-8 bytes).
670  //
671  // Directories leading up to the output paths are created by the worker prior
672  // to execution, even if they are not explicitly part of the input root.
673  //
674  // New in v2.1: this field supersedes the DEPRECATED `output_files` and
675  // `output_directories` fields. If `output_paths` is used, `output_files` and
676  // `output_directories` will be ignored!
677  repeated string output_paths = 7;
678
679  // The platform requirements for the execution environment. The server MAY
680  // choose to execute the action on any worker satisfying the requirements, so
681  // the client SHOULD ensure that running the action on any such worker will
682  // have the same result.  A detailed lexicon for this can be found in the
683  // accompanying platform.md.
684  // DEPRECATED as of v2.2: platform properties are now specified directly in
685  // the action. See documentation note in the
686  // [Action][build.bazel.remote.execution.v2.Action] for migration.
687  Platform platform = 5 [ deprecated = true ];
688
689  // The working directory, relative to the input root, for the command to run
690  // in. It must be a directory which exists in the input tree. If it is left
691  // empty, then the action is run in the input root.
692  string working_directory = 6;
693
694  // A list of keys for node properties the client expects to retrieve for
695  // output files and directories. Keys are either names of string-based
696  // [NodeProperty][build.bazel.remote.execution.v2.NodeProperty] or
697  // names of fields in [NodeProperties][build.bazel.remote.execution.v2.NodeProperties].
698  // In order to ensure that equivalent `Action`s always hash to the same
699  // value, the node properties MUST be lexicographically sorted by name.
700  // Sorting of strings is done by code point, equivalently, by the UTF-8 bytes.
701  //
702  // The interpretation of string-based properties is server-dependent. If a
703  // property is not recognized by the server, the server will return an
704  // `INVALID_ARGUMENT`.
705  repeated string output_node_properties = 8;
706
707  enum OutputDirectoryFormat {
708    // The client is only interested in receiving output directories in
709    // the form of a single Tree object, using the `tree_digest` field.
710    TREE_ONLY = 0;
711
712    // The client is only interested in receiving output directories in
713    // the form of a hierarchy of separately stored Directory objects,
714    // using the `root_directory_digest` field.
715    DIRECTORY_ONLY = 1;
716
717    // The client is interested in receiving output directories both in
718    // the form of a single Tree object and a hierarchy of separately
719    // stored Directory objects, using both the `tree_digest` and
720    // `root_directory_digest` fields.
721    TREE_AND_DIRECTORY = 2;
722  }
723
724  // The format that the worker should use to store the contents of
725  // output directories.
726  //
727  // In case this field is set to a value that is not supported by the
728  // worker, the worker SHOULD interpret this field as TREE_ONLY. The
729  // worker MAY store output directories in formats that are a superset
730  // of what was requested (e.g., interpreting DIRECTORY_ONLY as
731  // TREE_AND_DIRECTORY).
732  OutputDirectoryFormat output_directory_format = 9;
733}
734
735// A `Platform` is a set of requirements, such as hardware, operating system, or
736// compiler toolchain, for an
737// [Action][build.bazel.remote.execution.v2.Action]'s execution
738// environment. A `Platform` is represented as a series of key-value pairs
739// representing the properties that are required of the platform.
740message Platform {
741  // A single property for the environment. The server is responsible for
742  // specifying the property `name`s that it accepts. If an unknown `name` is
743  // provided in the requirements for an
744  // [Action][build.bazel.remote.execution.v2.Action], the server SHOULD
745  // reject the execution request. If permitted by the server, the same `name`
746  // may occur multiple times.
747  //
748  // The server is also responsible for specifying the interpretation of
749  // property `value`s. For instance, a property describing how much RAM must be
750  // available may be interpreted as allowing a worker with 16GB to fulfill a
751  // request for 8GB, while a property describing the OS environment on which
752  // the action must be performed may require an exact match with the worker's
753  // OS.
754  //
755  // The server MAY use the `value` of one or more properties to determine how
756  // it sets up the execution environment, such as by making specific system
757  // files available to the worker.
758  //
759  // Both names and values are typically case-sensitive. Note that the platform
760  // is implicitly part of the action digest, so even tiny changes in the names
761  // or values (like changing case) may result in different action cache
762  // entries.
763  message Property {
764    // The property name.
765    string name = 1;
766
767    // The property value.
768    string value = 2;
769  }
770
771  // The properties that make up this platform. In order to ensure that
772  // equivalent `Platform`s always hash to the same value, the properties MUST
773  // be lexicographically sorted by name, and then by value. Sorting of strings
774  // is done by code point, equivalently, by the UTF-8 bytes.
775  repeated Property properties = 1;
776}
777
778// A `Directory` represents a directory node in a file tree, containing zero or
779// more children [FileNodes][build.bazel.remote.execution.v2.FileNode],
780// [DirectoryNodes][build.bazel.remote.execution.v2.DirectoryNode] and
781// [SymlinkNodes][build.bazel.remote.execution.v2.SymlinkNode].
782// Each `Node` contains its name in the directory, either the digest of its
783// content (either a file blob or a `Directory` proto) or a symlink target, as
784// well as possibly some metadata about the file or directory.
785//
786// In order to ensure that two equivalent directory trees hash to the same
787// value, the following restrictions MUST be obeyed when constructing a
788// a `Directory`:
789//
790// * Every child in the directory must have a path of exactly one segment.
791//   Multiple levels of directory hierarchy may not be collapsed.
792// * Each child in the directory must have a unique path segment (file name).
793//   Note that while the API itself is case-sensitive, the environment where
794//   the Action is executed may or may not be case-sensitive. That is, it is
795//   legal to call the API with a Directory that has both "Foo" and "foo" as
796//   children, but the Action may be rejected by the remote system upon
797//   execution.
798// * The files, directories and symlinks in the directory must each be sorted
799//   in lexicographical order by path. The path strings must be sorted by code
800//   point, equivalently, by UTF-8 bytes.
801// * The [NodeProperties][build.bazel.remote.execution.v2.NodeProperty] of files,
802//   directories, and symlinks must be sorted in lexicographical order by
803//   property name.
804//
805// A `Directory` that obeys the restrictions is said to be in canonical form.
806//
807// As an example, the following could be used for a file named `bar` and a
808// directory named `foo` with an executable file named `baz` (hashes shortened
809// for readability):
810//
811// ```json
812// // (Directory proto)
813// {
814//   files: [
815//     {
816//       name: "bar",
817//       digest: {
818//         hash: "4a73bc9d03...",
819//         size: 65534
820//       },
821//       node_properties: [
822//         {
823//           "name": "MTime",
824//           "value": "2017-01-15T01:30:15.01Z"
825//         }
826//       ]
827//     }
828//   ],
829//   directories: [
830//     {
831//       name: "foo",
832//       digest: {
833//         hash: "4cf2eda940...",
834//         size: 43
835//       }
836//     }
837//   ]
838// }
839//
840// // (Directory proto with hash "4cf2eda940..." and size 43)
841// {
842//   files: [
843//     {
844//       name: "baz",
845//       digest: {
846//         hash: "b2c941073e...",
847//         size: 1294,
848//       },
849//       is_executable: true
850//     }
851//   ]
852// }
853// ```
854message Directory {
855  // The files in the directory.
856  repeated FileNode files = 1;
857
858  // The subdirectories in the directory.
859  repeated DirectoryNode directories = 2;
860
861  // The symlinks in the directory.
862  repeated SymlinkNode symlinks = 3;
863
864  // The node properties of the Directory.
865  reserved 4;
866  NodeProperties node_properties = 5;
867}
868
869// A single property for [FileNodes][build.bazel.remote.execution.v2.FileNode],
870// [DirectoryNodes][build.bazel.remote.execution.v2.DirectoryNode], and
871// [SymlinkNodes][build.bazel.remote.execution.v2.SymlinkNode]. The server is
872// responsible for specifying the property `name`s that it accepts. If
873// permitted by the server, the same `name` may occur multiple times.
874message NodeProperty {
875    // The property name.
876    string name = 1;
877
878    // The property value.
879    string value = 2;
880}
881
882// Node properties for [FileNodes][build.bazel.remote.execution.v2.FileNode],
883// [DirectoryNodes][build.bazel.remote.execution.v2.DirectoryNode], and
884// [SymlinkNodes][build.bazel.remote.execution.v2.SymlinkNode]. The server is
885// responsible for specifying the properties that it accepts.
886//
887message NodeProperties {
888  // A list of string-based
889  // [NodeProperties][build.bazel.remote.execution.v2.NodeProperty].
890  repeated NodeProperty properties = 1;
891
892  // The file's last modification timestamp.
893  google.protobuf.Timestamp mtime = 2;
894
895  // The UNIX file mode, e.g., 0755.
896  google.protobuf.UInt32Value unix_mode = 3;
897}
898
899// A `FileNode` represents a single file and associated metadata.
900message FileNode {
901  // The name of the file.
902  string name = 1;
903
904  // The digest of the file's content.
905  Digest digest = 2;
906
907  reserved 3; // Reserved to ensure wire-compatibility with `OutputFile`.
908
909  // True if file is executable, false otherwise.
910  bool is_executable = 4;
911
912  // The node properties of the FileNode.
913  reserved 5;
914  NodeProperties node_properties = 6;
915}
916
917// A `DirectoryNode` represents a child of a
918// [Directory][build.bazel.remote.execution.v2.Directory] which is itself
919// a `Directory` and its associated metadata.
920message DirectoryNode {
921  // The name of the directory.
922  string name = 1;
923
924  // The digest of the
925  // [Directory][build.bazel.remote.execution.v2.Directory] object
926  // represented. See [Digest][build.bazel.remote.execution.v2.Digest]
927  // for information about how to take the digest of a proto message.
928  Digest digest = 2;
929}
930
931// A `SymlinkNode` represents a symbolic link.
932message SymlinkNode {
933  // The name of the symlink.
934  string name = 1;
935
936  // The target path of the symlink. The path separator is a forward slash `/`.
937  // The target path can be relative to the parent directory of the symlink or
938  // it can be an absolute path starting with `/`. Support for absolute paths
939  // can be checked using the [Capabilities][build.bazel.remote.execution.v2.Capabilities]
940  // API. `..` components are allowed anywhere in the target path as logical
941  // canonicalization may lead to different behavior in the presence of
942  // directory symlinks (e.g. `foo/../bar` may not be the same as `bar`).
943  // To reduce potential cache misses, canonicalization is still recommended
944  // where this is possible without impacting correctness.
945  string target = 2;
946
947  // The node properties of the SymlinkNode.
948  reserved 3;
949  NodeProperties node_properties = 4;
950}
951
952// A content digest. A digest for a given blob consists of the size of the blob
953// and its hash. The hash algorithm to use is defined by the server.
954//
955// The size is considered to be an integral part of the digest and cannot be
956// separated. That is, even if the `hash` field is correctly specified but
957// `size_bytes` is not, the server MUST reject the request.
958//
959// The reason for including the size in the digest is as follows: in a great
960// many cases, the server needs to know the size of the blob it is about to work
961// with prior to starting an operation with it, such as flattening Merkle tree
962// structures or streaming it to a worker. Technically, the server could
963// implement a separate metadata store, but this results in a significantly more
964// complicated implementation as opposed to having the client specify the size
965// up-front (or storing the size along with the digest in every message where
966// digests are embedded). This does mean that the API leaks some implementation
967// details of (what we consider to be) a reasonable server implementation, but
968// we consider this to be a worthwhile tradeoff.
969//
970// When a `Digest` is used to refer to a proto message, it always refers to the
971// message in binary encoded form. To ensure consistent hashing, clients and
972// servers MUST ensure that they serialize messages according to the following
973// rules, even if there are alternate valid encodings for the same message:
974//
975// * Fields are serialized in tag order.
976// * There are no unknown fields.
977// * There are no duplicate fields.
978// * Fields are serialized according to the default semantics for their type.
979//
980// Most protocol buffer implementations will always follow these rules when
981// serializing, but care should be taken to avoid shortcuts. For instance,
982// concatenating two messages to merge them may produce duplicate fields.
983message Digest {
984  // The hash, represented as a lowercase hexadecimal string, padded with
985  // leading zeroes up to the hash function length.
986  string hash = 1;
987
988  // The size of the blob, in bytes.
989  int64 size_bytes = 2;
990}
991
992// ExecutedActionMetadata contains details about a completed execution.
993message ExecutedActionMetadata {
994  // The name of the worker which ran the execution.
995  string worker = 1;
996
997  // When was the action added to the queue.
998  google.protobuf.Timestamp queued_timestamp = 2;
999
1000  // When the worker received the action.
1001  google.protobuf.Timestamp worker_start_timestamp = 3;
1002
1003  // When the worker completed the action, including all stages.
1004  google.protobuf.Timestamp worker_completed_timestamp = 4;
1005
1006  // When the worker started fetching action inputs.
1007  google.protobuf.Timestamp input_fetch_start_timestamp = 5;
1008
1009  // When the worker finished fetching action inputs.
1010  google.protobuf.Timestamp input_fetch_completed_timestamp = 6;
1011
1012  // When the worker started executing the action command.
1013  google.protobuf.Timestamp execution_start_timestamp = 7;
1014
1015  // When the worker completed executing the action command.
1016  google.protobuf.Timestamp execution_completed_timestamp = 8;
1017
1018  // New in v2.3: the amount of time the worker spent executing the action
1019  // command, potentially computed using a worker-specific virtual clock.
1020  //
1021  // The virtual execution duration is only intended to cover the "execution" of
1022  // the specified action and not time in queue nor any overheads before or
1023  // after execution such as marshalling inputs/outputs. The server SHOULD avoid
1024  // including time spent the client doesn't have control over, and MAY extend
1025  // or reduce the execution duration to account for delays or speedups that
1026  // occur during execution itself (e.g., lazily loading data from the Content
1027  // Addressable Storage, live migration of virtual machines, emulation
1028  // overhead).
1029  //
1030  // The method of timekeeping used to compute the virtual execution duration
1031  // MUST be consistent with what is used to enforce the
1032  // [Action][[build.bazel.remote.execution.v2.Action]'s `timeout`. There is no
1033  // relationship between the virtual execution duration and the values of
1034  // `execution_start_timestamp` and `execution_completed_timestamp`.
1035  google.protobuf.Duration virtual_execution_duration = 12;
1036
1037  // When the worker started uploading action outputs.
1038  google.protobuf.Timestamp output_upload_start_timestamp = 9;
1039
1040  // When the worker finished uploading action outputs.
1041  google.protobuf.Timestamp output_upload_completed_timestamp = 10;
1042
1043  // Details that are specific to the kind of worker used. For example,
1044  // on POSIX-like systems this could contain a message with
1045  // getrusage(2) statistics.
1046  repeated google.protobuf.Any auxiliary_metadata = 11;
1047}
1048
1049// An ActionResult represents the result of an
1050// [Action][build.bazel.remote.execution.v2.Action] being run.
1051//
1052// It is advised that at least one field (for example
1053// `ActionResult.execution_metadata.Worker`) have a non-default value, to
1054// ensure that the serialized value is non-empty, which can then be used
1055// as a basic data sanity check.
1056message ActionResult {
1057  reserved 1; // Reserved for use as the resource name.
1058
1059  // The output files of the action. For each output file requested in the
1060  // `output_files` or `output_paths` field of the Action, if the corresponding
1061  // file existed after the action completed, a single entry will be present
1062  // either in this field, or the `output_file_symlinks` field if the file was
1063  // a symbolic link to another file (`output_symlinks` field after v2.1).
1064  //
1065  // If an output listed in `output_files` was found, but was a directory rather
1066  // than a regular file, the server will return a FAILED_PRECONDITION.
1067  // If the action does not produce the requested output, then that output
1068  // will be omitted from the list. The server is free to arrange the output
1069  // list as desired; clients MUST NOT assume that the output list is sorted.
1070  repeated OutputFile output_files = 2;
1071
1072  // The output files of the action that are symbolic links to other files. Those
1073  // may be links to other output files, or input files, or even absolute paths
1074  // outside of the working directory, if the server supports
1075  // [SymlinkAbsolutePathStrategy.ALLOWED][build.bazel.remote.execution.v2.CacheCapabilities.SymlinkAbsolutePathStrategy].
1076  // For each output file requested in the `output_files` or `output_paths`
1077  // field of the Action, if the corresponding file existed after
1078  // the action completed, a single entry will be present either in this field,
1079  // or in the `output_files` field, if the file was not a symbolic link.
1080  //
1081  // If an output symbolic link of the same name as listed in `output_files` of
1082  // the Command was found, but its target type was not a regular file, the
1083  // server will return a FAILED_PRECONDITION.
1084  // If the action does not produce the requested output, then that output
1085  // will be omitted from the list. The server is free to arrange the output
1086  // list as desired; clients MUST NOT assume that the output list is sorted.
1087  //
1088  // DEPRECATED as of v2.1. Servers that wish to be compatible with v2.0 API
1089  // should still populate this field in addition to `output_symlinks`.
1090  repeated OutputSymlink output_file_symlinks = 10 [ deprecated = true ];
1091
1092  // New in v2.1: this field will only be populated if the command
1093  // `output_paths` field was used, and not the pre v2.1 `output_files` or
1094  // `output_directories` fields.
1095  // The output paths of the action that are symbolic links to other paths. Those
1096  // may be links to other outputs, or inputs, or even absolute paths
1097  // outside of the working directory, if the server supports
1098  // [SymlinkAbsolutePathStrategy.ALLOWED][build.bazel.remote.execution.v2.CacheCapabilities.SymlinkAbsolutePathStrategy].
1099  // A single entry for each output requested in `output_paths`
1100  // field of the Action, if the corresponding path existed after
1101  // the action completed and was a symbolic link.
1102  //
1103  // If the action does not produce a requested output, then that output
1104  // will be omitted from the list. The server is free to arrange the output
1105  // list as desired; clients MUST NOT assume that the output list is sorted.
1106  repeated OutputSymlink output_symlinks = 12;
1107
1108  // The output directories of the action. For each output directory requested
1109  // in the `output_directories` or `output_paths` field of the Action, if the
1110  // corresponding directory existed after the action completed, a single entry
1111  // will be present in the output list, which will contain the digest of a
1112  // [Tree][build.bazel.remote.execution.v2.Tree] message containing the
1113  // directory tree, and the path equal exactly to the corresponding Action
1114  // output_directories member.
1115  //
1116  // As an example, suppose the Action had an output directory `a/b/dir` and the
1117  // execution produced the following contents in `a/b/dir`: a file named `bar`
1118  // and a directory named `foo` with an executable file named `baz`. Then,
1119  // output_directory will contain (hashes shortened for readability):
1120  //
1121  // ```json
1122  // // OutputDirectory proto:
1123  // {
1124  //   path: "a/b/dir"
1125  //   tree_digest: {
1126  //     hash: "4a73bc9d03...",
1127  //     size: 55
1128  //   }
1129  // }
1130  // // Tree proto with hash "4a73bc9d03..." and size 55:
1131  // {
1132  //   root: {
1133  //     files: [
1134  //       {
1135  //         name: "bar",
1136  //         digest: {
1137  //           hash: "4a73bc9d03...",
1138  //           size: 65534
1139  //         }
1140  //       }
1141  //     ],
1142  //     directories: [
1143  //       {
1144  //         name: "foo",
1145  //         digest: {
1146  //           hash: "4cf2eda940...",
1147  //           size: 43
1148  //         }
1149  //       }
1150  //     ]
1151  //   }
1152  //   children : {
1153  //     // (Directory proto with hash "4cf2eda940..." and size 43)
1154  //     files: [
1155  //       {
1156  //         name: "baz",
1157  //         digest: {
1158  //           hash: "b2c941073e...",
1159  //           size: 1294,
1160  //         },
1161  //         is_executable: true
1162  //       }
1163  //     ]
1164  //   }
1165  // }
1166  // ```
1167  // If an output of the same name as listed in `output_files` of
1168  // the Command was found in `output_directories`, but was not a directory, the
1169  // server will return a FAILED_PRECONDITION.
1170  repeated OutputDirectory output_directories = 3;
1171
1172  // The output directories of the action that are symbolic links to other
1173  // directories. Those may be links to other output directories, or input
1174  // directories, or even absolute paths outside of the working directory,
1175  // if the server supports
1176  // [SymlinkAbsolutePathStrategy.ALLOWED][build.bazel.remote.execution.v2.CacheCapabilities.SymlinkAbsolutePathStrategy].
1177  // For each output directory requested in the `output_directories` field of
1178  // the Action, if the directory existed after the action completed, a
1179  // single entry will be present either in this field, or in the
1180  // `output_directories` field, if the directory was not a symbolic link.
1181  //
1182  // If an output of the same name was found, but was a symbolic link to a file
1183  // instead of a directory, the server will return a FAILED_PRECONDITION.
1184  // If the action does not produce the requested output, then that output
1185  // will be omitted from the list. The server is free to arrange the output
1186  // list as desired; clients MUST NOT assume that the output list is sorted.
1187  //
1188  // DEPRECATED as of v2.1. Servers that wish to be compatible with v2.0 API
1189  // should still populate this field in addition to `output_symlinks`.
1190  repeated OutputSymlink output_directory_symlinks = 11 [ deprecated = true ];
1191
1192  // The exit code of the command.
1193  int32 exit_code = 4;
1194
1195  // The standard output buffer of the action. The server SHOULD NOT inline
1196  // stdout unless requested by the client in the
1197  // [GetActionResultRequest][build.bazel.remote.execution.v2.GetActionResultRequest]
1198  // message. The server MAY omit inlining, even if requested, and MUST do so if inlining
1199  // would cause the response to exceed message size limits.
1200  // Clients SHOULD NOT populate this field when uploading to the cache.
1201  bytes stdout_raw = 5;
1202
1203  // The digest for a blob containing the standard output of the action, which
1204  // can be retrieved from the
1205  // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
1206  Digest stdout_digest = 6;
1207
1208  // The standard error buffer of the action. The server SHOULD NOT inline
1209  // stderr unless requested by the client in the
1210  // [GetActionResultRequest][build.bazel.remote.execution.v2.GetActionResultRequest]
1211  // message. The server MAY omit inlining, even if requested, and MUST do so if inlining
1212  // would cause the response to exceed message size limits.
1213  // Clients SHOULD NOT populate this field when uploading to the cache.
1214  bytes stderr_raw = 7;
1215
1216  // The digest for a blob containing the standard error of the action, which
1217  // can be retrieved from the
1218  // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
1219  Digest stderr_digest = 8;
1220
1221  // The details of the execution that originally produced this result.
1222  ExecutedActionMetadata execution_metadata = 9;
1223}
1224
1225// An `OutputFile` is similar to a
1226// [FileNode][build.bazel.remote.execution.v2.FileNode], but it is used as an
1227// output in an `ActionResult`. It allows a full file path rather than
1228// only a name.
1229message OutputFile {
1230  // The full path of the file relative to the working directory, including the
1231  // filename. The path separator is a forward slash `/`. Since this is a
1232  // relative path, it MUST NOT begin with a leading forward slash.
1233  string path = 1;
1234
1235  // The digest of the file's content.
1236  Digest digest = 2;
1237
1238  reserved 3; // Used for a removed field in an earlier version of the API.
1239
1240  // True if file is executable, false otherwise.
1241  bool is_executable = 4;
1242
1243  // The contents of the file if inlining was requested. The server SHOULD NOT inline
1244  // file contents unless requested by the client in the
1245  // [GetActionResultRequest][build.bazel.remote.execution.v2.GetActionResultRequest]
1246  // message. The server MAY omit inlining, even if requested, and MUST do so if inlining
1247  // would cause the response to exceed message size limits.
1248  // Clients SHOULD NOT populate this field when uploading to the cache.
1249  bytes contents = 5;
1250
1251  // The supported node properties of the OutputFile, if requested by the Action.
1252  reserved 6;
1253  NodeProperties node_properties = 7;
1254}
1255
1256// A `Tree` contains all the
1257// [Directory][build.bazel.remote.execution.v2.Directory] protos in a
1258// single directory Merkle tree, compressed into one message.
1259message Tree {
1260  // The root directory in the tree.
1261  Directory root = 1;
1262
1263  // All the child directories: the directories referred to by the root and,
1264  // recursively, all its children. In order to reconstruct the directory tree,
1265  // the client must take the digests of each of the child directories and then
1266  // build up a tree starting from the `root`.
1267  // Servers SHOULD ensure that these are ordered consistently such that two
1268  // actions producing equivalent output directories on the same server
1269  // implementation also produce Tree messages with matching digests.
1270  repeated Directory children = 2;
1271}
1272
1273// An `OutputDirectory` is the output in an `ActionResult` corresponding to a
1274// directory's full contents rather than a single file.
1275message OutputDirectory {
1276  // The full path of the directory relative to the working directory. The path
1277  // separator is a forward slash `/`. Since this is a relative path, it MUST
1278  // NOT begin with a leading forward slash. The empty string value is allowed,
1279  // and it denotes the entire working directory.
1280  string path = 1;
1281
1282  reserved 2; // Used for a removed field in an earlier version of the API.
1283
1284  // The digest of the encoded
1285  // [Tree][build.bazel.remote.execution.v2.Tree] proto containing the
1286  // directory's contents.
1287  Digest tree_digest = 3;
1288
1289  // If set, consumers MAY make the following assumptions about the
1290  // directories contained in the the Tree, so that it may be
1291  // instantiated on a local file system by scanning through it
1292  // sequentially:
1293  //
1294  // - All directories with the same binary representation are stored
1295  //   exactly once.
1296  // - All directories, apart from the root directory, are referenced by
1297  //   at least one parent directory.
1298  // - Directories are stored in topological order, with parents being
1299  //   stored before the child. The root directory is thus the first to
1300  //   be stored.
1301  //
1302  // Additionally, the Tree MUST be encoded as a stream of records,
1303  // where each record has the following format:
1304  //
1305  // - A tag byte, having one of the following two values:
1306  //   - (1 << 3) | 2 == 0x0a: First record (the root directory).
1307  //   - (2 << 3) | 2 == 0x12: Any subsequent records (child directories).
1308  // - The size of the directory, encoded as a base 128 varint.
1309  // - The contents of the directory, encoded as a binary serialized
1310  //   Protobuf message.
1311  //
1312  // This encoding is a subset of the Protobuf wire format of the Tree
1313  // message. As it is only permitted to store data associated with
1314  // field numbers 1 and 2, the tag MUST be encoded as a single byte.
1315  // More details on the Protobuf wire format can be found here:
1316  // https://developers.google.com/protocol-buffers/docs/encoding
1317  //
1318  // It is recommended that implementations using this feature construct
1319  // Tree objects manually using the specification given above, as
1320  // opposed to using a Protobuf library to marshal a full Tree message.
1321  // As individual Directory messages already need to be marshaled to
1322  // compute their digests, constructing the Tree object manually avoids
1323  // redundant marshaling.
1324  bool is_topologically_sorted = 4;
1325
1326  // The digest of the encoded
1327  // [Directory][build.bazel.remote.execution.v2.Directory] proto
1328  // containing the contents the directory's root.
1329  //
1330  // If both `tree_digest` and `root_directory_digest` are set, this
1331  // field MUST match the digest of the root directory contained in the
1332  // Tree message.
1333  Digest root_directory_digest = 5;
1334}
1335
1336// An `OutputSymlink` is similar to a
1337// [Symlink][build.bazel.remote.execution.v2.SymlinkNode], but it is used as an
1338// output in an `ActionResult`.
1339//
1340// `OutputSymlink` is binary-compatible with `SymlinkNode`.
1341message OutputSymlink {
1342  // The full path of the symlink relative to the working directory, including the
1343  // filename. The path separator is a forward slash `/`. Since this is a
1344  // relative path, it MUST NOT begin with a leading forward slash.
1345  string path = 1;
1346
1347  // The target path of the symlink. The path separator is a forward slash `/`.
1348  // The target path can be relative to the parent directory of the symlink or
1349  // it can be an absolute path starting with `/`. Support for absolute paths
1350  // can be checked using the [Capabilities][build.bazel.remote.execution.v2.Capabilities]
1351  // API. `..` components are allowed anywhere in the target path.
1352  string target = 2;
1353
1354  // The supported node properties of the OutputSymlink, if requested by the
1355  // Action.
1356  reserved 3;
1357  NodeProperties node_properties = 4;
1358}
1359
1360// An `ExecutionPolicy` can be used to control the scheduling of the action.
1361message ExecutionPolicy {
1362  // The priority (relative importance) of this action. Generally, a lower value
1363  // means that the action should be run sooner than actions having a greater
1364  // priority value, but the interpretation of a given value is server-
1365  // dependent. A priority of 0 means the *default* priority. Priorities may be
1366  // positive or negative, and such actions should run later or sooner than
1367  // actions having the default priority, respectively. The particular semantics
1368  // of this field is up to the server. In particular, every server will have
1369  // their own supported range of priorities, and will decide how these map into
1370  // scheduling policy.
1371  int32 priority = 1;
1372}
1373
1374// A `ResultsCachePolicy` is used for fine-grained control over how action
1375// outputs are stored in the CAS and Action Cache.
1376message ResultsCachePolicy {
1377  // The priority (relative importance) of this content in the overall cache.
1378  // Generally, a lower value means a longer retention time or other advantage,
1379  // but the interpretation of a given value is server-dependent. A priority of
1380  // 0 means a *default* value, decided by the server.
1381  //
1382  // The particular semantics of this field is up to the server. In particular,
1383  // every server will have their own supported range of priorities, and will
1384  // decide how these map into retention/eviction policy.
1385  int32 priority = 1;
1386}
1387
1388// A request message for
1389// [Execution.Execute][build.bazel.remote.execution.v2.Execution.Execute].
1390message ExecuteRequest {
1391  // The instance of the execution system to operate against. A server may
1392  // support multiple instances of the execution system (with their own workers,
1393  // storage, caches, etc.). The server MAY require use of this field to select
1394  // between them in an implementation-defined fashion, otherwise it can be
1395  // omitted.
1396  string instance_name = 1;
1397
1398  // If true, the action will be executed even if its result is already
1399  // present in the [ActionCache][build.bazel.remote.execution.v2.ActionCache].
1400  // The execution is still allowed to be merged with other in-flight executions
1401  // of the same action, however - semantically, the service MUST only guarantee
1402  // that the results of an execution with this field set were not visible
1403  // before the corresponding execution request was sent.
1404  // Note that actions from execution requests setting this field set are still
1405  // eligible to be entered into the action cache upon completion, and services
1406  // SHOULD overwrite any existing entries that may exist. This allows
1407  // skip_cache_lookup requests to be used as a mechanism for replacing action
1408  // cache entries that reference outputs no longer available or that are
1409  // poisoned in any way.
1410  // If false, the result may be served from the action cache.
1411  bool skip_cache_lookup = 3;
1412
1413  reserved 2, 4, 5; // Used for removed fields in an earlier version of the API.
1414
1415  // The digest of the [Action][build.bazel.remote.execution.v2.Action] to
1416  // execute.
1417  Digest action_digest = 6;
1418
1419  // An optional policy for execution of the action.
1420  // The server will have a default policy if this is not provided.
1421  ExecutionPolicy execution_policy = 7;
1422
1423  // An optional policy for the results of this execution in the remote cache.
1424  // The server will have a default policy if this is not provided.
1425  // This may be applied to both the ActionResult and the associated blobs.
1426  ResultsCachePolicy results_cache_policy = 8;
1427
1428  // The digest function that was used to compute the action digest.
1429  //
1430  // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256,
1431  // SHA384, SHA512, or VSO, the client MAY leave this field unset. In
1432  // that case the server SHOULD infer the digest function using the
1433  // length of the action digest hash and the digest functions announced
1434  // in the server's capabilities.
1435  DigestFunction.Value digest_function = 9;
1436
1437  // A hint to the server to request inlining stdout in the
1438  // [ActionResult][build.bazel.remote.execution.v2.ActionResult] message.
1439  bool inline_stdout = 10;
1440
1441  // A hint to the server to request inlining stderr in the
1442  // [ActionResult][build.bazel.remote.execution.v2.ActionResult] message.
1443  bool inline_stderr = 11;
1444
1445  // A hint to the server to inline the contents of the listed output files.
1446  // Each path needs to exactly match one file path in either `output_paths` or
1447  // `output_files` (DEPRECATED since v2.1) in the
1448  // [Command][build.bazel.remote.execution.v2.Command] message.
1449  repeated string inline_output_files = 12;
1450}
1451
1452// A `LogFile` is a log stored in the CAS.
1453message LogFile {
1454  // The digest of the log contents.
1455  Digest digest = 1;
1456
1457  // This is a hint as to the purpose of the log, and is set to true if the log
1458  // is human-readable text that can be usefully displayed to a user, and false
1459  // otherwise. For instance, if a command-line client wishes to print the
1460  // server logs to the terminal for a failed action, this allows it to avoid
1461  // displaying a binary file.
1462  bool human_readable = 2;
1463}
1464
1465// The response message for
1466// [Execution.Execute][build.bazel.remote.execution.v2.Execution.Execute],
1467// which will be contained in the [response
1468// field][google.longrunning.Operation.response] of the
1469// [Operation][google.longrunning.Operation].
1470message ExecuteResponse {
1471  // The result of the action.
1472  ActionResult result = 1;
1473
1474  // True if the result was served from cache, false if it was executed.
1475  bool cached_result = 2;
1476
1477  // If the status has a code other than `OK`, it indicates that the action did
1478  // not finish execution. For example, if the operation times out during
1479  // execution, the status will have a `DEADLINE_EXCEEDED` code. Servers MUST
1480  // use this field for errors in execution, rather than the error field on the
1481  // `Operation` object.
1482  //
1483  // If the status code is other than `OK`, then the result MUST NOT be cached.
1484  // For an error status, the `result` field is optional; the server may
1485  // populate the output-, stdout-, and stderr-related fields if it has any
1486  // information available, such as the stdout and stderr of a timed-out action.
1487  google.rpc.Status status = 3;
1488
1489  // An optional list of additional log outputs the server wishes to provide. A
1490  // server can use this to return execution-specific logs however it wishes.
1491  // This is intended primarily to make it easier for users to debug issues that
1492  // may be outside of the actual job execution, such as by identifying the
1493  // worker executing the action or by providing logs from the worker's setup
1494  // phase. The keys SHOULD be human readable so that a client can display them
1495  // to a user.
1496  map<string, LogFile> server_logs = 4;
1497
1498  // Freeform informational message with details on the execution of the action
1499  // that may be displayed to the user upon failure or when requested explicitly.
1500  string message = 5;
1501}
1502
1503// The current stage of action execution.
1504//
1505// Even though these stages are numbered according to the order in which
1506// they generally occur, there is no requirement that the remote
1507// execution system reports events along this order. For example, an
1508// operation MAY transition from the EXECUTING stage back to QUEUED
1509// in case the hardware on which the operation executes fails.
1510//
1511// If and only if the remote execution system reports that an operation
1512// has reached the COMPLETED stage, it MUST set the [done
1513// field][google.longrunning.Operation.done] of the
1514// [Operation][google.longrunning.Operation] and terminate the stream.
1515message ExecutionStage {
1516  enum Value {
1517    // Invalid value.
1518    UNKNOWN = 0;
1519
1520    // Checking the result against the cache.
1521    CACHE_CHECK = 1;
1522
1523    // Currently idle, awaiting a free machine to execute.
1524    QUEUED = 2;
1525
1526    // Currently being executed by a worker.
1527    EXECUTING = 3;
1528
1529    // Finished execution.
1530    COMPLETED = 4;
1531  }
1532}
1533
1534// Metadata about an ongoing
1535// [execution][build.bazel.remote.execution.v2.Execution.Execute], which
1536// will be contained in the [metadata
1537// field][google.longrunning.Operation.response] of the
1538// [Operation][google.longrunning.Operation].
1539message ExecuteOperationMetadata {
1540  // The current stage of execution.
1541  ExecutionStage.Value stage = 1;
1542
1543  // The digest of the [Action][build.bazel.remote.execution.v2.Action]
1544  // being executed.
1545  Digest action_digest = 2;
1546
1547  // If set, the client can use this resource name with
1548  // [ByteStream.Read][google.bytestream.ByteStream.Read] to stream the
1549  // standard output from the endpoint hosting streamed responses.
1550  string stdout_stream_name = 3;
1551
1552  // If set, the client can use this resource name with
1553  // [ByteStream.Read][google.bytestream.ByteStream.Read] to stream the
1554  // standard error from the endpoint hosting streamed responses.
1555  string stderr_stream_name = 4;
1556
1557  // The client can read this field to view details about the ongoing
1558  // execution.
1559  ExecutedActionMetadata partial_execution_metadata = 5;
1560}
1561
1562// A request message for
1563// [WaitExecution][build.bazel.remote.execution.v2.Execution.WaitExecution].
1564message WaitExecutionRequest {
1565  // The name of the [Operation][google.longrunning.Operation]
1566  // returned by [Execute][build.bazel.remote.execution.v2.Execution.Execute].
1567  string name = 1;
1568}
1569
1570// A request message for
1571// [ActionCache.GetActionResult][build.bazel.remote.execution.v2.ActionCache.GetActionResult].
1572message GetActionResultRequest {
1573  // The instance of the execution system to operate against. A server may
1574  // support multiple instances of the execution system (with their own workers,
1575  // storage, caches, etc.). The server MAY require use of this field to select
1576  // between them in an implementation-defined fashion, otherwise it can be
1577  // omitted.
1578  string instance_name = 1;
1579
1580  // The digest of the [Action][build.bazel.remote.execution.v2.Action]
1581  // whose result is requested.
1582  Digest action_digest = 2;
1583
1584  // A hint to the server to request inlining stdout in the
1585  // [ActionResult][build.bazel.remote.execution.v2.ActionResult] message.
1586  bool inline_stdout = 3;
1587
1588  // A hint to the server to request inlining stderr in the
1589  // [ActionResult][build.bazel.remote.execution.v2.ActionResult] message.
1590  bool inline_stderr = 4;
1591
1592  // A hint to the server to inline the contents of the listed output files.
1593  // Each path needs to exactly match one file path in either `output_paths` or
1594  // `output_files` (DEPRECATED since v2.1) in the
1595  // [Command][build.bazel.remote.execution.v2.Command] message.
1596  repeated string inline_output_files = 5;
1597
1598  // The digest function that was used to compute the action digest.
1599  //
1600  // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256,
1601  // SHA384, SHA512, or VSO, the client MAY leave this field unset. In
1602  // that case the server SHOULD infer the digest function using the
1603  // length of the action digest hash and the digest functions announced
1604  // in the server's capabilities.
1605  DigestFunction.Value digest_function = 6;
1606}
1607
1608// A request message for
1609// [ActionCache.UpdateActionResult][build.bazel.remote.execution.v2.ActionCache.UpdateActionResult].
1610message UpdateActionResultRequest {
1611  // The instance of the execution system to operate against. A server may
1612  // support multiple instances of the execution system (with their own workers,
1613  // storage, caches, etc.). The server MAY require use of this field to select
1614  // between them in an implementation-defined fashion, otherwise it can be
1615  // omitted.
1616  string instance_name = 1;
1617
1618  // The digest of the [Action][build.bazel.remote.execution.v2.Action]
1619  // whose result is being uploaded.
1620  Digest action_digest = 2;
1621
1622  // The [ActionResult][build.bazel.remote.execution.v2.ActionResult]
1623  // to store in the cache.
1624  ActionResult action_result = 3;
1625
1626  // An optional policy for the results of this execution in the remote cache.
1627  // The server will have a default policy if this is not provided.
1628  // This may be applied to both the ActionResult and the associated blobs.
1629  ResultsCachePolicy results_cache_policy = 4;
1630
1631  // The digest function that was used to compute the action digest.
1632  //
1633  // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256,
1634  // SHA384, SHA512, or VSO, the client MAY leave this field unset. In
1635  // that case the server SHOULD infer the digest function using the
1636  // length of the action digest hash and the digest functions announced
1637  // in the server's capabilities.
1638  DigestFunction.Value digest_function = 5;
1639}
1640
1641// A request message for
1642// [ContentAddressableStorage.FindMissingBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.FindMissingBlobs].
1643message FindMissingBlobsRequest {
1644  // The instance of the execution system to operate against. A server may
1645  // support multiple instances of the execution system (with their own workers,
1646  // storage, caches, etc.). The server MAY require use of this field to select
1647  // between them in an implementation-defined fashion, otherwise it can be
1648  // omitted.
1649  string instance_name = 1;
1650
1651  // A list of the blobs to check. All digests MUST use the same digest
1652  // function.
1653  repeated Digest blob_digests = 2;
1654
1655  // The digest function of the blobs whose existence is checked.
1656  //
1657  // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256,
1658  // SHA384, SHA512, or VSO, the client MAY leave this field unset. In
1659  // that case the server SHOULD infer the digest function using the
1660  // length of the blob digest hashes and the digest functions announced
1661  // in the server's capabilities.
1662  DigestFunction.Value digest_function = 3;
1663}
1664
1665// A response message for
1666// [ContentAddressableStorage.FindMissingBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.FindMissingBlobs].
1667message FindMissingBlobsResponse {
1668  // A list of the blobs requested *not* present in the storage.
1669  repeated Digest missing_blob_digests = 2;
1670}
1671
1672// A request message for
1673// [ContentAddressableStorage.BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
1674message BatchUpdateBlobsRequest {
1675  // A request corresponding to a single blob that the client wants to upload.
1676  message Request {
1677    // The digest of the blob. This MUST be the digest of `data`. All
1678    // digests MUST use the same digest function.
1679    Digest digest = 1;
1680
1681    // The raw binary data.
1682    bytes data = 2;
1683
1684    // The format of `data`. Must be `IDENTITY`/unspecified, or one of the
1685    // compressors advertised by the
1686    // [CacheCapabilities.supported_batch_compressors][build.bazel.remote.execution.v2.CacheCapabilities.supported_batch_compressors]
1687    // field.
1688    Compressor.Value compressor = 3;
1689  }
1690
1691  // The instance of the execution system to operate against. A server may
1692  // support multiple instances of the execution system (with their own workers,
1693  // storage, caches, etc.). The server MAY require use of this field to select
1694  // between them in an implementation-defined fashion, otherwise it can be
1695  // omitted.
1696  string instance_name = 1;
1697
1698  // The individual upload requests.
1699  repeated Request requests = 2;
1700
1701  // The digest function that was used to compute the digests of the
1702  // blobs being uploaded.
1703  //
1704  // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256,
1705  // SHA384, SHA512, or VSO, the client MAY leave this field unset. In
1706  // that case the server SHOULD infer the digest function using the
1707  // length of the blob digest hashes and the digest functions announced
1708  // in the server's capabilities.
1709  DigestFunction.Value digest_function = 5;
1710}
1711
1712// A response message for
1713// [ContentAddressableStorage.BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
1714message BatchUpdateBlobsResponse {
1715  // A response corresponding to a single blob that the client tried to upload.
1716  message Response {
1717    // The blob digest to which this response corresponds.
1718    Digest digest = 1;
1719
1720    // The result of attempting to upload that blob.
1721    google.rpc.Status status = 2;
1722  }
1723
1724  // The responses to the requests.
1725  repeated Response responses = 1;
1726}
1727
1728// A request message for
1729// [ContentAddressableStorage.BatchReadBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchReadBlobs].
1730message BatchReadBlobsRequest {
1731  // The instance of the execution system to operate against. A server may
1732  // support multiple instances of the execution system (with their own workers,
1733  // storage, caches, etc.). The server MAY require use of this field to select
1734  // between them in an implementation-defined fashion, otherwise it can be
1735  // omitted.
1736  string instance_name = 1;
1737
1738  // The individual blob digests. All digests MUST use the same digest
1739  // function.
1740  repeated Digest digests = 2;
1741
1742  // A list of acceptable encodings for the returned inlined data, in no
1743  // particular order. `IDENTITY` is always allowed even if not specified here.
1744  repeated Compressor.Value acceptable_compressors = 3;
1745
1746  // The digest function of the blobs being requested.
1747  //
1748  // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256,
1749  // SHA384, SHA512, or VSO, the client MAY leave this field unset. In
1750  // that case the server SHOULD infer the digest function using the
1751  // length of the blob digest hashes and the digest functions announced
1752  // in the server's capabilities.
1753  DigestFunction.Value digest_function = 4;
1754}
1755
1756// A response message for
1757// [ContentAddressableStorage.BatchReadBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchReadBlobs].
1758message BatchReadBlobsResponse {
1759  // A response corresponding to a single blob that the client tried to download.
1760  message Response {
1761    // The digest to which this response corresponds.
1762    Digest digest = 1;
1763
1764    // The raw binary data.
1765    bytes data = 2;
1766
1767    // The format the data is encoded in. MUST be `IDENTITY`/unspecified,
1768    // or one of the acceptable compressors specified in the `BatchReadBlobsRequest`.
1769    Compressor.Value compressor = 4;
1770
1771    // The result of attempting to download that blob.
1772    google.rpc.Status status = 3;
1773  }
1774
1775  // The responses to the requests.
1776  repeated Response responses = 1;
1777}
1778
1779// A request message for
1780// [ContentAddressableStorage.GetTree][build.bazel.remote.execution.v2.ContentAddressableStorage.GetTree].
1781message GetTreeRequest {
1782  // The instance of the execution system to operate against. A server may
1783  // support multiple instances of the execution system (with their own workers,
1784  // storage, caches, etc.). The server MAY require use of this field to select
1785  // between them in an implementation-defined fashion, otherwise it can be
1786  // omitted.
1787  string instance_name = 1;
1788
1789  // The digest of the root, which must be an encoded
1790  // [Directory][build.bazel.remote.execution.v2.Directory] message
1791  // stored in the
1792  // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
1793  Digest root_digest = 2;
1794
1795  // A maximum page size to request. If present, the server will request no more
1796  // than this many items. Regardless of whether a page size is specified, the
1797  // server may place its own limit on the number of items to be returned and
1798  // require the client to retrieve more items using a subsequent request.
1799  int32 page_size = 3;
1800
1801  // A page token, which must be a value received in a previous
1802  // [GetTreeResponse][build.bazel.remote.execution.v2.GetTreeResponse].
1803  // If present, the server will use that token as an offset, returning only
1804  // that page and the ones that succeed it.
1805  string page_token = 4;
1806
1807  // The digest function that was used to compute the digest of the root
1808  // directory.
1809  //
1810  // If the digest function used is one of MD5, MURMUR3, SHA1, SHA256,
1811  // SHA384, SHA512, or VSO, the client MAY leave this field unset. In
1812  // that case the server SHOULD infer the digest function using the
1813  // length of the root digest hash and the digest functions announced
1814  // in the server's capabilities.
1815  DigestFunction.Value digest_function = 5;
1816}
1817
1818// A response message for
1819// [ContentAddressableStorage.GetTree][build.bazel.remote.execution.v2.ContentAddressableStorage.GetTree].
1820message GetTreeResponse {
1821  // The directories descended from the requested root.
1822  repeated Directory directories = 1;
1823
1824  // If present, signifies that there are more results which the client can
1825  // retrieve by passing this as the page_token in a subsequent
1826  // [request][build.bazel.remote.execution.v2.GetTreeRequest].
1827  // If empty, signifies that this is the last page of results.
1828  string next_page_token = 2;
1829}
1830
1831// A request message for
1832// [Capabilities.GetCapabilities][build.bazel.remote.execution.v2.Capabilities.GetCapabilities].
1833message GetCapabilitiesRequest {
1834  // The instance of the execution system to operate against. A server may
1835  // support multiple instances of the execution system (with their own workers,
1836  // storage, caches, etc.). The server MAY require use of this field to select
1837  // between them in an implementation-defined fashion, otherwise it can be
1838  // omitted.
1839  string instance_name = 1;
1840}
1841
1842// A response message for
1843// [Capabilities.GetCapabilities][build.bazel.remote.execution.v2.Capabilities.GetCapabilities].
1844message ServerCapabilities {
1845  // Capabilities of the remote cache system.
1846  CacheCapabilities cache_capabilities = 1;
1847
1848  // Capabilities of the remote execution system.
1849  ExecutionCapabilities execution_capabilities = 2;
1850
1851  // Earliest RE API version supported, including deprecated versions.
1852  build.bazel.semver.SemVer deprecated_api_version = 3;
1853
1854  // Earliest non-deprecated RE API version supported.
1855  build.bazel.semver.SemVer low_api_version = 4;
1856
1857  // Latest RE API version supported.
1858  build.bazel.semver.SemVer high_api_version = 5;
1859}
1860
1861// The digest function used for converting values into keys for CAS and Action
1862// Cache.
1863message DigestFunction {
1864  enum Value {
1865    // It is an error for the server to return this value.
1866    UNKNOWN = 0;
1867
1868    // The SHA-256 digest function.
1869    SHA256 = 1;
1870
1871    // The SHA-1 digest function.
1872    SHA1 = 2;
1873
1874    // The MD5 digest function.
1875    MD5 = 3;
1876
1877    // The Microsoft "VSO-Hash" paged SHA256 digest function.
1878    // See https://github.com/microsoft/BuildXL/blob/master/Documentation/Specs/PagedHash.md .
1879    VSO = 4;
1880
1881    // The SHA-384 digest function.
1882    SHA384 = 5;
1883
1884    // The SHA-512 digest function.
1885    SHA512 = 6;
1886
1887    // Murmur3 128-bit digest function, x64 variant. Note that this is not a
1888    // cryptographic hash function and its collision properties are not strongly guaranteed.
1889    // See https://github.com/aappleby/smhasher/wiki/MurmurHash3 .
1890    MURMUR3 = 7;
1891
1892    // The SHA-256 digest function, modified to use a Merkle tree for
1893    // large objects. This permits implementations to store large blobs
1894    // as a decomposed sequence of 2^j sized chunks, where j >= 10,
1895    // while being able to validate integrity at the chunk level.
1896    //
1897    // Furthermore, on systems that do not offer dedicated instructions
1898    // for computing SHA-256 hashes (e.g., the Intel SHA and ARMv8
1899    // cryptographic extensions), SHA256TREE hashes can be computed more
1900    // efficiently than plain SHA-256 hashes by using generic SIMD
1901    // extensions, such as Intel AVX2 or ARM NEON.
1902    //
1903    // SHA256TREE hashes are computed as follows:
1904    //
1905    // - For blobs that are 1024 bytes or smaller, the hash is computed
1906    //   using the regular SHA-256 digest function.
1907    //
1908    // - For blobs that are more than 1024 bytes in size, the hash is
1909    //   computed as follows:
1910    //
1911    //   1. The blob is partitioned into a left (leading) and right
1912    //      (trailing) blob. These blobs have lengths m and n
1913    //      respectively, where m = 2^k and 0 < n <= m.
1914    //
1915    //   2. Hashes of the left and right blob, Hash(left) and
1916    //      Hash(right) respectively, are computed by recursively
1917    //      applying the SHA256TREE algorithm.
1918    //
1919    //   3. A single invocation is made to the SHA-256 block cipher with
1920    //      the following parameters:
1921    //
1922    //          M = Hash(left) || Hash(right)
1923    //          H = {
1924    //              0xcbbb9d5d, 0x629a292a, 0x9159015a, 0x152fecd8,
1925    //              0x67332667, 0x8eb44a87, 0xdb0c2e0d, 0x47b5481d,
1926    //          }
1927    //
1928    //      The values of H are the leading fractional parts of the
1929    //      square roots of the 9th to the 16th prime number (23 to 53).
1930    //      This differs from plain SHA-256, where the first eight prime
1931    //      numbers (2 to 19) are used, thereby preventing trivial hash
1932    //      collisions between small and large objects.
1933    //
1934    //   4. The hash of the full blob can then be obtained by
1935    //      concatenating the outputs of the block cipher:
1936    //
1937    //          Hash(blob) = a || b || c || d || e || f || g || h
1938    //
1939    //      Addition of the original values of H, as normally done
1940    //      through the use of the Davies-Meyer structure, is not
1941    //      performed. This isn't necessary, as the block cipher is only
1942    //      invoked once.
1943    //
1944    // Test vectors of this digest function can be found in the
1945    // accompanying sha256tree_test_vectors.txt file.
1946    SHA256TREE = 8;
1947
1948    // The BLAKE3 hash function.
1949    // See https://github.com/BLAKE3-team/BLAKE3.
1950    BLAKE3 = 9;
1951  }
1952}
1953
1954// Describes the server/instance capabilities for updating the action cache.
1955message ActionCacheUpdateCapabilities {
1956  bool update_enabled = 1;
1957}
1958
1959// Allowed values for priority in
1960// [ResultsCachePolicy][build.bazel.remoteexecution.v2.ResultsCachePolicy] and
1961// [ExecutionPolicy][build.bazel.remoteexecution.v2.ResultsCachePolicy]
1962// Used for querying both cache and execution valid priority ranges.
1963message PriorityCapabilities {
1964  // Supported range of priorities, including boundaries.
1965  message PriorityRange {
1966    // The minimum numeric value for this priority range, which represents the
1967    // most urgent task or longest retained item.
1968    int32 min_priority = 1;
1969    // The maximum numeric value for this priority range, which represents the
1970    // least urgent task or shortest retained item.
1971    int32 max_priority = 2;
1972  }
1973  repeated PriorityRange priorities = 1;
1974}
1975
1976// Describes how the server treats absolute symlink targets.
1977message SymlinkAbsolutePathStrategy {
1978  enum Value {
1979    // Invalid value.
1980    UNKNOWN = 0;
1981
1982    // Server will return an `INVALID_ARGUMENT` on input symlinks with absolute
1983    // targets.
1984    // If an action tries to create an output symlink with an absolute target, a
1985    // `FAILED_PRECONDITION` will be returned.
1986    DISALLOWED = 1;
1987
1988    // Server will allow symlink targets to escape the input root tree, possibly
1989    // resulting in non-hermetic builds.
1990    ALLOWED = 2;
1991  }
1992}
1993
1994// Compression formats which may be supported.
1995message Compressor {
1996  enum Value {
1997    // No compression. Servers and clients MUST always support this, and do
1998    // not need to advertise it.
1999    IDENTITY = 0;
2000
2001    // Zstandard compression.
2002    ZSTD = 1;
2003
2004    // RFC 1951 Deflate. This format is identical to what is used by ZIP
2005    // files. Headers such as the one generated by gzip are not
2006    // included.
2007    //
2008    // It is advised to use algorithms such as Zstandard instead, as
2009    // those are faster and/or provide a better compression ratio.
2010    DEFLATE = 2;
2011
2012    // Brotli compression.
2013    BROTLI = 3;
2014  }
2015}
2016
2017// Capabilities of the remote cache system.
2018message CacheCapabilities {
2019  // All the digest functions supported by the remote cache.
2020  // Remote cache may support multiple digest functions simultaneously.
2021  repeated DigestFunction.Value digest_functions = 1;
2022
2023  // Capabilities for updating the action cache.
2024  ActionCacheUpdateCapabilities action_cache_update_capabilities = 2;
2025
2026  // Supported cache priority range for both CAS and ActionCache.
2027  PriorityCapabilities cache_priority_capabilities = 3;
2028
2029  // Maximum total size of blobs to be uploaded/downloaded using
2030  // batch methods. A value of 0 means no limit is set, although
2031  // in practice there will always be a message size limitation
2032  // of the protocol in use, e.g. GRPC.
2033  int64 max_batch_total_size_bytes = 4;
2034
2035  // Whether absolute symlink targets are supported.
2036  SymlinkAbsolutePathStrategy.Value symlink_absolute_path_strategy = 5;
2037
2038  // Compressors supported by the "compressed-blobs" bytestream resources.
2039  // Servers MUST support identity/no-compression, even if it is not listed
2040  // here.
2041  //
2042  // Note that this does not imply which if any compressors are supported by
2043  // the server at the gRPC level.
2044  repeated Compressor.Value supported_compressors = 6;
2045
2046  // Compressors supported for inlined data in
2047  // [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs]
2048  // requests.
2049  repeated Compressor.Value supported_batch_update_compressors = 7;
2050}
2051
2052// Capabilities of the remote execution system.
2053message ExecutionCapabilities {
2054  // Legacy field for indicating which digest function is supported by the
2055  // remote execution system. It MUST be set to a value other than UNKNOWN.
2056  // Implementations should consider the repeated digest_functions field
2057  // first, falling back to this singular field if digest_functions is unset.
2058  DigestFunction.Value digest_function = 1;
2059
2060  // Whether remote execution is enabled for the particular server/instance.
2061  bool exec_enabled = 2;
2062
2063  // Supported execution priority range.
2064  PriorityCapabilities execution_priority_capabilities = 3;
2065
2066  // Supported node properties.
2067  repeated string supported_node_properties = 4;
2068
2069  // All the digest functions supported by the remote execution system.
2070  // If this field is set, it MUST also contain digest_function.
2071  //
2072  // Even if the remote execution system announces support for multiple
2073  // digest functions, individual execution requests may only reference
2074  // CAS objects using a single digest function. For example, it is not
2075  // permitted to execute actions having both MD5 and SHA-256 hashed
2076  // files in their input root.
2077  //
2078  // The CAS objects referenced by action results generated by the
2079  // remote execution system MUST use the same digest function as the
2080  // one used to construct the action.
2081  repeated DigestFunction.Value digest_functions = 5;
2082}
2083
2084// Details for the tool used to call the API.
2085message ToolDetails {
2086  // Name of the tool, e.g. bazel.
2087  string tool_name = 1;
2088
2089  // Version of the tool used for the request, e.g. 5.0.3.
2090  string tool_version = 2;
2091}
2092
2093// An optional Metadata to attach to any RPC request to tell the server about an
2094// external context of the request. The server may use this for logging or other
2095// purposes. To use it, the client attaches the header to the call using the
2096// canonical proto serialization:
2097//
2098// * name: `build.bazel.remote.execution.v2.requestmetadata-bin`
2099// * contents: the base64 encoded binary `RequestMetadata` message.
2100// Note: the gRPC library serializes binary headers encoded in base64 by
2101// default (https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests).
2102// Therefore, if the gRPC library is used to pass/retrieve this
2103// metadata, the user may ignore the base64 encoding and assume it is simply
2104// serialized as a binary message.
2105message RequestMetadata {
2106  // The details for the tool invoking the requests.
2107  ToolDetails tool_details = 1;
2108
2109  // An identifier that ties multiple requests to the same action.
2110  // For example, multiple requests to the CAS, Action Cache, and Execution
2111  // API are used in order to compile foo.cc.
2112  string action_id = 2;
2113
2114  // An identifier that ties multiple actions together to a final result.
2115  // For example, multiple actions are required to build and run foo_test.
2116  string tool_invocation_id = 3;
2117
2118  // An identifier to tie multiple tool invocations together. For example,
2119  // runs of foo_test, bar_test and baz_test on a post-submit of a given patch.
2120  string correlated_invocations_id = 4;
2121
2122  // A brief description of the kind of action, for example, CppCompile or GoLink.
2123  // There is no standard agreed set of values for this, and they are expected to vary between different client tools.
2124  string action_mnemonic = 5;
2125
2126  // An identifier for the target which produced this action.
2127  // No guarantees are made around how many actions may relate to a single target.
2128  string target_id = 6;
2129
2130  // An identifier for the configuration in which the target was built,
2131  // e.g. for differentiating building host tools or different target platforms.
2132  // There is no expectation that this value will have any particular structure,
2133  // or equality across invocations, though some client tools may offer these guarantees.
2134  string configuration_id = 7;
2135}
2136