aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/compiler/xrt/xrt.proto
blob: 5678f0905ff5b8956e0811026e7450acba8815e9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
syntax = "proto3";

package xrt;

import "tensorflow/compiler/tf2xla/host_compute_metadata.proto";
import "tensorflow/compiler/xla/xla_data.proto";
import "tensorflow/compiler/xla/service/hlo.proto";

// Options for an XLA compilation.
message XLAComputationConfig {
  // The number of replicas the computation will be run on. If this is
  // default (0) it is interpreted as 1.
  int32 num_replicas = 1;
  // The number of "model-parallel" cores per replica. If this is
  // default (0) it is interpreted as 1.
  int32 num_cores_per_replica = 2;
  // Optional metadata about host sends and recvs.
  tensorflow.tf2xla.HostComputeMetadata host_compute_metadata = 3;

  // The arg/result shapes for the whole computation.
  xla.ProgramShape program_shape = 4;
  // The arg/result shapes for each core of a model-parallel
  // computation. per_core_args_and_result_shapes is optional for a
  // single-core computation.
  repeated xla.ProgramShape per_core_program_shape = 5;
}

// Options and XLA computation for a compilation.
message XLAComputation {
  XLAComputationConfig config = 1;
  xla.HloSnapshot hlo_snapshot = 2;
}

// Literal to allocate space for, and transfer to, device memory.
message XLAAllocation {
  int32 device_ordinal = 1;
  xla.LiteralProto value = 2;
}

// Node in a tree describing a tuple constructed from input handles. A
// node is an internal node if tuples is non-empty, in which case
// input_index and release_input_handle are ignored. Otherwise a node
// is a leaf node. Each leaf XLATupleNode is the index of an input
// which corresponds to a handle that will be grafted onto the output
// tuple at that location. If release_input_handle is true that input
// handle will be released and become invalid.  Inputs may be repeated
// in which case leaves of the output tuple will alias. If an input is
// repeated, release_input_handle must be false for every leaf where
// that input appears.
//
// For example, if input 0 has shape {} and input 1 has shape {2,3}
// then the XLATupleNode with structure {1,{0,1}} corresponds to a
// tuple with shape {{2,3},{{},{2,3}}}.
message XLATupleNode {
  int32 input_index = 1;
  bool release_input_handle = 2;
  repeated XLATupleNode tuples = 3;
}

// Options for an XLA execution.
message XRTExecutionConfig {
  // Local device to run on. This is present because the execute Op
  // may be placed on a device such as CPU or TPU_SYSTEM that
  // logically manages multiple cores.
  int32 device_ordinal = 1;
  // Which model-parallel computation to run from the compiled bundle.
  int32 core_index_in_replica = 2;
  // Optional key to disambiguate between executions. This is only
  // needed if multiple host send/recvs may be outstanding
  // concurrently with executions.
  string execution_instance_key = 3;
  // If non-zero, rng_seed to reset the core with.
  uint32 rng_seed = 4;
  // If true, release allocation handles on the inputs after running.
  bool release_input_handles = 5;
  // If true, release the handle to the computation after running.
  bool release_compilation_handle = 6;
}