aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/compiler/jit
diff options
context:
space:
mode:
authorGravatar Sanjoy Das <sanjoy@google.com>2018-08-24 11:38:02 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-08-24 11:51:12 -0700
commit5968329fba2801e48758b1fc44f500572aa1f952 (patch)
tree404b4698a36dc2ecfbbeddde5387a12f815ff466 /tensorflow/compiler/jit
parent0fc04d33486fb4667666048750ef113fb9c92829 (diff)
Make compile_options a mandatory const-ref argument.
PiperOrigin-RevId: 210130976
Diffstat (limited to 'tensorflow/compiler/jit')
-rw-r--r--tensorflow/compiler/jit/kernels/xla_launch_op.cc2
-rw-r--r--tensorflow/compiler/jit/xla_compilation_cache.cc15
-rw-r--r--tensorflow/compiler/jit/xla_compilation_cache.h6
-rw-r--r--tensorflow/compiler/jit/xla_compile_on_demand_op.cc2
4 files changed, 12 insertions, 13 deletions
diff --git a/tensorflow/compiler/jit/kernels/xla_launch_op.cc b/tensorflow/compiler/jit/kernels/xla_launch_op.cc
index ddb27a38ae..fde4135bf7 100644
--- a/tensorflow/compiler/jit/kernels/xla_launch_op.cc
+++ b/tensorflow/compiler/jit/kernels/xla_launch_op.cc
@@ -187,7 +187,7 @@ void XlaLocalLaunchBase::Compute(OpKernelContext* ctx) {
OP_REQUIRES_OK(
ctx, cache->Compile(options, function_, constant_args, variables, ctx,
- &kernel, &executable, &compile_options));
+ &kernel, &executable, compile_options));
VLOG(1) << "Executing XLA Computation...";
diff --git a/tensorflow/compiler/jit/xla_compilation_cache.cc b/tensorflow/compiler/jit/xla_compilation_cache.cc
index 7140d47a94..ef6b0e67d3 100644
--- a/tensorflow/compiler/jit/xla_compilation_cache.cc
+++ b/tensorflow/compiler/jit/xla_compilation_cache.cc
@@ -230,7 +230,7 @@ Status XlaCompilationCache::Compile(
const std::map<int, OptionalTensor>& variable_args, OpKernelContext* ctx,
const XlaCompiler::CompilationResult** compilation_result,
xla::LocalExecutable** executable,
- const XlaCompiler::CompileOptions* compile_options) {
+ const XlaCompiler::CompileOptions& compile_options) {
return CompileImpl(options, function, constant_args, variable_args, ctx,
compilation_result, executable, compile_options, false);
}
@@ -241,7 +241,7 @@ Status XlaCompilationCache::CompileSingleOp(
const std::map<int, OptionalTensor>& variable_args, OpKernelContext* ctx,
const XlaCompiler::CompilationResult** compilation_result,
xla::LocalExecutable** executable,
- const XlaCompiler::CompileOptions* compile_options) {
+ const XlaCompiler::CompileOptions& compile_options) {
const NodeDef& def = ctx->op_kernel().def();
NameAttrList name;
name.set_name(def.op());
@@ -256,7 +256,7 @@ Status XlaCompilationCache::CompileImpl(
const std::map<int, OptionalTensor>& variable_args, OpKernelContext* ctx,
const XlaCompiler::CompilationResult** compilation_result,
xla::LocalExecutable** executable,
- const XlaCompiler::CompileOptions* compile_options,
+ const XlaCompiler::CompileOptions& compile_options,
bool compile_single_op) {
CHECK_NE(executable, nullptr);
VLOG(1) << "XlaCompilationCache::Compile " << DebugString();
@@ -324,13 +324,12 @@ Status XlaCompilationCache::CompileImpl(
entry->compiled = true;
if (compile_single_op) {
- entry->compilation_status = compiler.CompileSingleOp(
- compile_options ? *compile_options : XlaCompiler::CompileOptions(),
- signature.name, ctx, args, &entry->compilation_result);
+ entry->compilation_status =
+ compiler.CompileSingleOp(compile_options, signature.name, ctx, args,
+ &entry->compilation_result);
} else {
entry->compilation_status = compiler.CompileFunction(
- compile_options ? *compile_options : XlaCompiler::CompileOptions(),
- function, args, &entry->compilation_result);
+ compile_options, function, args, &entry->compilation_result);
}
TF_RETURN_IF_ERROR(entry->compilation_status);
CHECK_EQ(entry->executable.get(), nullptr);
diff --git a/tensorflow/compiler/jit/xla_compilation_cache.h b/tensorflow/compiler/jit/xla_compilation_cache.h
index fc5f008f4f..10ad87e38c 100644
--- a/tensorflow/compiler/jit/xla_compilation_cache.h
+++ b/tensorflow/compiler/jit/xla_compilation_cache.h
@@ -70,7 +70,7 @@ class XlaCompilationCache : public ResourceBase {
OpKernelContext* ctx,
const XlaCompiler::CompilationResult** compilation_result,
xla::LocalExecutable** executable,
- const XlaCompiler::CompileOptions* compile_options);
+ const XlaCompiler::CompileOptions& compile_options);
// As above, but calls XlaCompiler::CompileSingleOp instead of
// XlaCompiler::CompileFunction.
@@ -80,7 +80,7 @@ class XlaCompilationCache : public ResourceBase {
const std::map<int, OptionalTensor>& variable_args, OpKernelContext* ctx,
const XlaCompiler::CompilationResult** compilation_result,
xla::LocalExecutable** executable,
- const XlaCompiler::CompileOptions* compile_options);
+ const XlaCompiler::CompileOptions& compile_options);
xla::LocalClient* client() const { return client_; }
const DeviceType& device_type() const { return device_type_; }
@@ -96,7 +96,7 @@ class XlaCompilationCache : public ResourceBase {
OpKernelContext* ctx,
const XlaCompiler::CompilationResult** compilation_result,
xla::LocalExecutable** executable,
- const XlaCompiler::CompileOptions* compile_options,
+ const XlaCompiler::CompileOptions& compile_options,
bool compile_single_op);
// Takes `result` which has been compiled from a Tensorflow subgraph to a
diff --git a/tensorflow/compiler/jit/xla_compile_on_demand_op.cc b/tensorflow/compiler/jit/xla_compile_on_demand_op.cc
index dd84fb34c1..3ba48e8c31 100644
--- a/tensorflow/compiler/jit/xla_compile_on_demand_op.cc
+++ b/tensorflow/compiler/jit/xla_compile_on_demand_op.cc
@@ -177,7 +177,7 @@ Status XlaCompileOnDemandOp::Compile(
std::map<int, OptionalTensor> variable_args = GetVariables(ctx);
return cache->CompileSingleOp(options, constant_arguments, variable_args, ctx,
- result, executable, &compile_options);
+ result, executable, compile_options);
}
void XlaCompileOnDemandOp::Compute(OpKernelContext* ctx) {