diff options
-rw-r--r-- | tensorflow/compiler/xla/service/compile_only_service.cc | 4 | ||||
-rw-r--r-- | tensorflow/compiler/xla/service/local_service.cc | 4 | ||||
-rw-r--r-- | tensorflow/compiler/xla/service/service.cc | 11 | ||||
-rw-r--r-- | tensorflow/compiler/xla/service/service.h | 9 |
4 files changed, 2 insertions, 26 deletions
diff --git a/tensorflow/compiler/xla/service/compile_only_service.cc b/tensorflow/compiler/xla/service/compile_only_service.cc index cfcf2744fb..b0867c47a1 100644 --- a/tensorflow/compiler/xla/service/compile_only_service.cc +++ b/tensorflow/compiler/xla/service/compile_only_service.cc @@ -62,9 +62,7 @@ CompileOnlyService::CompileOnlyService( std::unique_ptr<Backend> compute_constant_backend) : Service(options, /*backend=*/nullptr, std::move(compute_constant_backend)), - compiler_(compiler) { - runs_in_client_process_ = true; -} + compiler_(compiler) {} StatusOr<std::vector<std::unique_ptr<AotCompilationResult>>> CompileOnlyService::CompileAheadOfTime( diff --git a/tensorflow/compiler/xla/service/local_service.cc b/tensorflow/compiler/xla/service/local_service.cc index 05a1028a23..25588a6fb8 100644 --- a/tensorflow/compiler/xla/service/local_service.cc +++ b/tensorflow/compiler/xla/service/local_service.cc @@ -71,9 +71,7 @@ LocalService::LocalService(const ServiceOptions& options, std::unique_ptr<Backend> execute_backend, std::unique_ptr<Backend> compute_constant_backend) : Service(options, std::move(execute_backend), - std::move(compute_constant_backend)) { - runs_in_client_process_ = true; -} + std::move(compute_constant_backend)) {} namespace { // Returns the space required to allocate a shape. If diff --git a/tensorflow/compiler/xla/service/service.cc b/tensorflow/compiler/xla/service/service.cc index 31740757ab..68441ef17f 100644 --- a/tensorflow/compiler/xla/service/service.cc +++ b/tensorflow/compiler/xla/service/service.cc @@ -1207,17 +1207,6 @@ tensorflow::Status Service::GetComputationStats( return tensorflow::Status::OK(); } -tensorflow::Status Service::CheckRunsInClientProcess( - const string& method_name) const { - if (runs_in_client_process_) { - return tensorflow::Status::OK(); - } else { - return FailedPrecondition( - "%s only supported if service runs in the same process as the client", - method_name.c_str()); - } -} - template <typename RequestT, typename ResponseT> tensorflow::Status Service::AddInstruction( const RequestT* arg, ResponseT* result, diff --git a/tensorflow/compiler/xla/service/service.h b/tensorflow/compiler/xla/service/service.h index 14e8a676a1..4ed89ca04a 100644 --- a/tensorflow/compiler/xla/service/service.h +++ b/tensorflow/compiler/xla/service/service.h @@ -333,12 +333,6 @@ class Service : public ServiceInterface { const std::function<StatusOr<ComputationDataHandle>(UserComputation*)>& adder); - // If the service is running in the client process - // (runs_in_client_process_ is true) then return - // tensorflow::Status::OK. Otherwise return an appropriate error - // status with the given method name. Used for "InProcess" methods. - tensorflow::Status CheckRunsInClientProcess(const string& method_name) const; - // Convenience function which checks whether the given shape_with_layout // (presumably passed by the client to set the result layout) is valid for the // given computation result shape. @@ -380,9 +374,6 @@ class Service : public ServiceInterface { // Backend to use when executing ComputeConstant. std::unique_ptr<Backend> compute_constant_backend_; - // Whether the service runs in the same process as the client. - bool runs_in_client_process_ = false; - TF_DISALLOW_COPY_AND_ASSIGN(Service); }; |