aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/compiler/xla/service/service.cc
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-10-09 16:53:29 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-10-09 17:06:10 -0700
commitbb5fc614a4a358b350ef8dd19cb7010760fa9b29 (patch)
tree43a745ffdc409d0ff4660342d6a62735ac366a13 /tensorflow/compiler/xla/service/service.cc
parent65b7d0b2f84c334327a295bf41bc06c7f6b8ffe5 (diff)
[XLA] Cleanup: Make AllocationTracker::Resolve const.
So that when resolving some global data, we don't have to worry whether "Resolve" is going to mutate the real data. PiperOrigin-RevId: 216448145
Diffstat (limited to 'tensorflow/compiler/xla/service/service.cc')
-rw-r--r--tensorflow/compiler/xla/service/service.cc4
1 files changed, 2 insertions, 2 deletions
diff --git a/tensorflow/compiler/xla/service/service.cc b/tensorflow/compiler/xla/service/service.cc
index b27a92f2a0..084df17951 100644
--- a/tensorflow/compiler/xla/service/service.cc
+++ b/tensorflow/compiler/xla/service/service.cc
@@ -207,7 +207,7 @@ Status Service::ValidateResultShape(const Shape& client_shape,
StatusOr<std::vector<std::vector<const ShapedBuffer*>>>
Service::ResolveAndValidateArguments(
absl::Span<const GlobalDataHandle* const> arguments,
- absl::Span<se::StreamExecutor* const> stream_executors) {
+ absl::Span<se::StreamExecutor* const> stream_executors) const {
CHECK_EQ(options_.number_of_replicas(), stream_executors.size());
std::vector<std::vector<const ShapedBuffer*>> replicated_arguments;
replicated_arguments.resize(options_.number_of_replicas());
@@ -590,7 +590,7 @@ StatusOr<std::vector<se::StreamExecutor*>> Service::GetExecutors(
StatusOr<std::vector<std::vector<const ShapedBuffer*>>> Service::GetArguments(
const ExecutionOptions& execution_options,
- absl::Span<const GlobalDataHandle* const> arguments) {
+ absl::Span<const GlobalDataHandle* const> arguments) const {
// Resolve the allocations for the arguments of the computation, and create
// a vector of device memory offsets for the arguments from the allocations.
// In the case of partitioned computations, assume all arguments go on the