diff options
4 files changed, 20 insertions, 20 deletions
diff --git a/tensorflow/compiler/xla/client/compile_only_client.cc b/tensorflow/compiler/xla/client/compile_only_client.cc index c7e2c4367b..59662c95ac 100644 --- a/tensorflow/compiler/xla/client/compile_only_client.cc +++ b/tensorflow/compiler/xla/client/compile_only_client.cc @@ -39,16 +39,15 @@ CompileOnlyClient::CompileAheadOfTime( return compiler_service_->CompileAheadOfTime(service_instances, options); } -int64 CompileOnlyClient::PointerSizeForTriple( - tensorflow::StringPiece target_triple) { - llvm::Triple triple(llvm::Triple::normalize( - llvm::StringRef(target_triple.data(), target_triple.size()))); - if (triple.isArch64Bit()) { +int64 CompileOnlyClient::PointerSizeForTriple(tensorflow::StringPiece triple) { + llvm::Triple llvm_triple( + llvm::Triple::normalize(llvm::StringRef(triple.data(), triple.size()))); + if (llvm_triple.isArch64Bit()) { return 8; - } else if (triple.isArch32Bit()) { + } else if (llvm_triple.isArch32Bit()) { return 4; } else { - CHECK(triple.isArch16Bit()); + CHECK(llvm_triple.isArch16Bit()); return 2; } } diff --git a/tensorflow/compiler/xla/client/computation_builder.cc b/tensorflow/compiler/xla/client/computation_builder.cc index 2a6e02649d..4afef6e448 100644 --- a/tensorflow/compiler/xla/client/computation_builder.cc +++ b/tensorflow/compiler/xla/client/computation_builder.cc @@ -408,7 +408,7 @@ ComputationDataHandle ComputationBuilder::Reshape( ComputationDataHandle ComputationBuilder::Collapse( const ComputationDataHandle& operand, - tensorflow::gtl::ArraySlice<int64> dims_to_collapse) { + tensorflow::gtl::ArraySlice<int64> dimensions) { if (!first_error_.ok()) { return ComputationDataHandle(); } @@ -416,8 +416,8 @@ ComputationDataHandle ComputationBuilder::Collapse( // Don't support out-of-order collapse here. // Checks that the collapsed dimensions are in order and consecutive. for (tensorflow::gtl::ArraySlice<int64>::size_type i = 1; - i < dims_to_collapse.size(); ++i) { - if (dims_to_collapse[i] - 1 != dims_to_collapse[i - 1]) { + i < dimensions.size(); ++i) { + if (dimensions[i] - 1 != dimensions[i - 1]) { NoteError(InvalidArgument( "Collapsed dimensions are not in order and consecutive.")); return ComputationDataHandle(); @@ -434,9 +434,9 @@ ComputationDataHandle ComputationBuilder::Collapse( VLOG(3) << "original shape: " << ShapeUtil::HumanString(*original_shape); VLOG(3) << "dims to collapse: " - << tensorflow::str_util::Join(dims_to_collapse, ","); + << tensorflow::str_util::Join(dimensions, ","); - if (dims_to_collapse.size() <= 1) { + if (dimensions.size() <= 1) { // Not collapsing anything, trivially we can return the operand versus // enqueueing a trivial reshape. return operand; @@ -444,7 +444,7 @@ ComputationDataHandle ComputationBuilder::Collapse( std::vector<int64> new_sizes; for (int i = 0; i < ShapeUtil::Rank(*original_shape); ++i) { - if (i <= dims_to_collapse.front() || i > dims_to_collapse.back()) { + if (i <= dimensions.front() || i > dimensions.back()) { new_sizes.push_back(original_shape->dimensions(i)); } else { new_sizes.back() *= original_shape->dimensions(i); @@ -753,13 +753,13 @@ ComputationDataHandle ComputationBuilder::Infeed(const Shape& shape, } void ComputationBuilder::Outfeed(const ComputationDataHandle& operand, - const Shape& shape, + const Shape& shape_with_layout, const string& outfeed_config) { OpRequest op_request; OutfeedRequest* request = op_request.mutable_outfeed_request(); request->set_outfeed_config(outfeed_config); *request->mutable_operand() = operand; - *request->mutable_shape() = shape; + *request->mutable_shape() = shape_with_layout; RunOpAndNoteError(&op_request); } @@ -1382,15 +1382,16 @@ ComputationDataHandle ComputationBuilder::BatchNormInference( ComputationDataHandle ComputationBuilder::BatchNormGrad( const ComputationDataHandle& operand, const ComputationDataHandle& scale, - const ComputationDataHandle& mean, const ComputationDataHandle& var, + const ComputationDataHandle& batch_mean, + const ComputationDataHandle& batch_var, const ComputationDataHandle& grad_output, float epsilon, int64 feature_index) { OpRequest op_request; BatchNormGradRequest* request = op_request.mutable_batch_norm_grad_request(); *request->mutable_operand() = operand; *request->mutable_scale() = scale; - *request->mutable_mean() = mean; - *request->mutable_variance() = var; + *request->mutable_mean() = batch_mean; + *request->mutable_variance() = batch_var; *request->mutable_grad_output() = grad_output; request->set_epsilon(epsilon); request->set_feature_index(feature_index); diff --git a/tensorflow/compiler/xla/client/computation_builder.h b/tensorflow/compiler/xla/client/computation_builder.h index 377b671639..e085fcb3b1 100644 --- a/tensorflow/compiler/xla/client/computation_builder.h +++ b/tensorflow/compiler/xla/client/computation_builder.h @@ -872,7 +872,7 @@ class ComputationBuilder { Window* window); // Internal helper method that does the building for an arbitrary unary op. - ComputationDataHandle UnaryOp(UnaryOperation binop, + ComputationDataHandle UnaryOp(UnaryOperation unop, const ComputationDataHandle& operand); // Internal helper method that does the building for an arbitrary binary op. diff --git a/tensorflow/compiler/xla/client/local_client.h b/tensorflow/compiler/xla/client/local_client.h index b52a30f5a0..de0ed13c43 100644 --- a/tensorflow/compiler/xla/client/local_client.h +++ b/tensorflow/compiler/xla/client/local_client.h @@ -69,7 +69,7 @@ class LocalExecutable { // of the computation. tensorflow::Status ValidateExecutionOptions( const tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments, - const ExecutableRunOptions& options, const Backend& backend); + const ExecutableRunOptions& run_options, const Backend& backend); // Records the computation in a SessionModule proto with the arguments used to // invoke it, and the result. Enabled by flag: --tla_dump_executions_to. |