aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/compiler/xla/service
diff options
context:
space:
mode:
authorGravatar Justin Lebar <jlebar@google.com>2018-08-27 14:50:49 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-08-27 14:55:29 -0700
commitd57f5a82025702d573d478091dc9c385adf53c09 (patch)
treee0a9f6b00cd6846cf71a79e02779e1e1f1f9cfc5 /tensorflow/compiler/xla/service
parent91f33732cb15f51eaf6ec86c82e42a74e351e061 (diff)
[XLA] Switch to absl::StrFormat.
Unlike Printf, StrFormat does not require type-length qualifiers, e.g %z, %ll. Nor does it require that you call c_str() to print strings. So these are fixed up here as well. PiperOrigin-RevId: 210435915
Diffstat (limited to 'tensorflow/compiler/xla/service')
-rw-r--r--tensorflow/compiler/xla/service/BUILD21
-rw-r--r--tensorflow/compiler/xla/service/allocation_tracker.cc13
-rw-r--r--tensorflow/compiler/xla/service/backend.cc2
-rw-r--r--tensorflow/compiler/xla/service/buffer_assignment.cc60
-rw-r--r--tensorflow/compiler/xla/service/buffer_liveness.cc16
-rw-r--r--tensorflow/compiler/xla/service/call_graph.cc20
-rw-r--r--tensorflow/compiler/xla/service/call_inliner.cc2
-rw-r--r--tensorflow/compiler/xla/service/channel_tracker.cc12
-rw-r--r--tensorflow/compiler/xla/service/compiler.cc2
-rw-r--r--tensorflow/compiler/xla/service/computation_placer.cc2
-rw-r--r--tensorflow/compiler/xla/service/cpu/BUILD7
-rw-r--r--tensorflow/compiler/xla/service/cpu/cpu_compiler.cc3
-rw-r--r--tensorflow/compiler/xla/service/cpu/cpu_executable.cc20
-rw-r--r--tensorflow/compiler/xla/service/cpu/cpu_hlo_support_checker.cc5
-rw-r--r--tensorflow/compiler/xla/service/cpu/cpu_runtime_test.cc18
-rw-r--r--tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.cc10
-rw-r--r--tensorflow/compiler/xla/service/cpu/disassembler.cc6
-rw-r--r--tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc2
-rw-r--r--tensorflow/compiler/xla/service/cpu/ir_emitter.cc28
-rw-r--r--tensorflow/compiler/xla/service/cpu/parallel_loop_emitter.cc8
-rw-r--r--tensorflow/compiler/xla/service/cpu/sample_harness.cc6
-rw-r--r--tensorflow/compiler/xla/service/cpu/tests/cpu_intrinsic_test.cc4
-rw-r--r--tensorflow/compiler/xla/service/device_memory_allocator.cc9
-rw-r--r--tensorflow/compiler/xla/service/dfs_hlo_visitor.cc4
-rw-r--r--tensorflow/compiler/xla/service/elemental_ir_emitter.cc32
-rw-r--r--tensorflow/compiler/xla/service/executable.cc8
-rw-r--r--tensorflow/compiler/xla/service/execution_tracker.cc4
-rw-r--r--tensorflow/compiler/xla/service/gather_expander.cc2
-rw-r--r--tensorflow/compiler/xla/service/gpu/BUILD5
-rw-r--r--tensorflow/compiler/xla/service/gpu/buffer_allocations.cc4
-rw-r--r--tensorflow/compiler/xla/service/gpu/buffer_comparator.cc2
-rw-r--r--tensorflow/compiler/xla/service/gpu/conditional_thunk.cc2
-rw-r--r--tensorflow/compiler/xla/service/gpu/convolution_thunk.cc1
-rw-r--r--tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_thunk.cc1
-rw-r--r--tensorflow/compiler/xla/service/gpu/cudnn_convolution_algorithm_picker.cc7
-rw-r--r--tensorflow/compiler/xla/service/gpu/cudnn_convolution_runner.cc4
-rw-r--r--tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc8
-rw-r--r--tensorflow/compiler/xla/service/gpu/fft_thunk.cc8
-rw-r--r--tensorflow/compiler/xla/service/gpu/gemm_thunk.cc2
-rw-r--r--tensorflow/compiler/xla/service/gpu/gpu_executable.cc9
-rw-r--r--tensorflow/compiler/xla/service/gpu/gpu_hlo_support_checker.cc5
-rw-r--r--tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.cc4
-rw-r--r--tensorflow/compiler/xla/service/gpu/hlo_schedule_test.cc3
-rw-r--r--tensorflow/compiler/xla/service/gpu/infeed_thunk.cc2
-rw-r--r--tensorflow/compiler/xla/service/gpu/instruction_fusion_test.cc2
-rw-r--r--tensorflow/compiler/xla/service/gpu/ir_emitter.cc4
-rw-r--r--tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc3
-rw-r--r--tensorflow/compiler/xla/service/gpu/kernel_thunk.cc4
-rw-r--r--tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/BUILD1
-rw-r--r--tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/dump_ir_pass.cc7
-rw-r--r--tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/nvptx_backend_lib.cc1
-rw-r--r--tensorflow/compiler/xla/service/gpu/outfeed_thunk.cc2
-rw-r--r--tensorflow/compiler/xla/service/gpu/partition_assignment.cc11
-rw-r--r--tensorflow/compiler/xla/service/gpu/stream_assignment_test.cc4
-rw-r--r--tensorflow/compiler/xla/service/gpu/while_thunk.cc2
-rw-r--r--tensorflow/compiler/xla/service/hlo_computation.cc9
-rw-r--r--tensorflow/compiler/xla/service/hlo_dataflow_analysis.cc2
-rw-r--r--tensorflow/compiler/xla/service/hlo_evaluator.cc14
-rw-r--r--tensorflow/compiler/xla/service/hlo_evaluator.h4
-rw-r--r--tensorflow/compiler/xla/service/hlo_evaluator_typed_visitor.h14
-rw-r--r--tensorflow/compiler/xla/service/hlo_graph_dumper.cc152
-rw-r--r--tensorflow/compiler/xla/service/hlo_instruction.cc8
-rw-r--r--tensorflow/compiler/xla/service/hlo_instruction_test.cc2
-rw-r--r--tensorflow/compiler/xla/service/hlo_lexer.cc3
-rw-r--r--tensorflow/compiler/xla/service/hlo_module_group_metadata.cc28
-rw-r--r--tensorflow/compiler/xla/service/hlo_module_group_util.cc2
-rw-r--r--tensorflow/compiler/xla/service/hlo_opcode.cc2
-rw-r--r--tensorflow/compiler/xla/service/hlo_ordering.cc19
-rw-r--r--tensorflow/compiler/xla/service/hlo_parser.cc107
-rw-r--r--tensorflow/compiler/xla/service/hlo_pass_pipeline.cc7
-rw-r--r--tensorflow/compiler/xla/service/hlo_rematerialization.cc13
-rw-r--r--tensorflow/compiler/xla/service/hlo_scheduling.cc1
-rw-r--r--tensorflow/compiler/xla/service/hlo_verifier.cc131
-rw-r--r--tensorflow/compiler/xla/service/human_readable_profile_builder.cc35
-rw-r--r--tensorflow/compiler/xla/service/interpreter/platform.cc6
-rw-r--r--tensorflow/compiler/xla/service/layout_assignment.cc73
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/BUILD1
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.cc2
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/llvm_loop.cc1
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/loop_emitter.cc4
-rw-r--r--tensorflow/compiler/xla/service/local_service.cc14
-rw-r--r--tensorflow/compiler/xla/service/platform_util.cc14
-rw-r--r--tensorflow/compiler/xla/service/scatter_expander.cc2
-rw-r--r--tensorflow/compiler/xla/service/service.cc66
-rw-r--r--tensorflow/compiler/xla/service/shape_inference.cc699
-rw-r--r--tensorflow/compiler/xla/service/shaped_buffer.cc10
-rw-r--r--tensorflow/compiler/xla/service/source_map_util.cc8
-rw-r--r--tensorflow/compiler/xla/service/source_map_util.h34
-rw-r--r--tensorflow/compiler/xla/service/transfer_manager.cc17
-rw-r--r--tensorflow/compiler/xla/service/tuple_points_to_analysis.cc17
90 files changed, 944 insertions, 1011 deletions
diff --git a/tensorflow/compiler/xla/service/BUILD b/tensorflow/compiler/xla/service/BUILD
index 47d376c8ac..f164a614f1 100644
--- a/tensorflow/compiler/xla/service/BUILD
+++ b/tensorflow/compiler/xla/service/BUILD
@@ -178,6 +178,7 @@ cc_library(
"//tensorflow/core:lib",
"@com_google_absl//absl/algorithm:container",
"@com_google_absl//absl/strings",
+ "@com_google_absl//absl/strings:str_format",
],
)
@@ -465,6 +466,7 @@ cc_library(
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
"@com_google_absl//absl/strings",
+ "@com_google_absl//absl/strings:str_format",
],
)
@@ -637,6 +639,7 @@ cc_library(
"//tensorflow/core:stream_executor_no_cuda",
"@com_google_absl//absl/memory",
"@com_google_absl//absl/strings",
+ "@com_google_absl//absl/strings:str_format",
],
alwayslink = 1,
)
@@ -671,6 +674,7 @@ cc_library(
"//tensorflow/core:stream_executor_no_cuda",
"@com_google_absl//absl/memory",
"@com_google_absl//absl/strings",
+ "@com_google_absl//absl/strings:str_format",
],
)
@@ -746,6 +750,7 @@ cc_library(
"//tensorflow/core:stream_executor_no_cuda",
"@com_google_absl//absl/memory",
"@com_google_absl//absl/strings",
+ "@com_google_absl//absl/strings:str_format",
],
)
@@ -795,6 +800,7 @@ cc_library(
"//tensorflow/core:stream_executor_no_cuda",
"//tensorflow/stream_executor",
"@com_google_absl//absl/memory",
+ "@com_google_absl//absl/strings:str_format",
],
)
@@ -946,6 +952,7 @@ cc_library(
"//tensorflow/compiler/xla:util",
"//tensorflow/core:lib",
"@com_google_absl//absl/strings",
+ "@com_google_absl//absl/strings:str_format",
],
)
@@ -992,6 +999,7 @@ cc_library(
"//tensorflow/core:lib_internal",
"@com_google_absl//absl/memory",
"@com_google_absl//absl/strings",
+ "@com_google_absl//absl/strings:str_format",
],
)
@@ -1040,6 +1048,7 @@ cc_library(
"//tensorflow/compiler/xla:util",
"//tensorflow/core:lib",
"@com_google_absl//absl/strings",
+ "@com_google_absl//absl/strings:str_format",
],
)
@@ -1746,6 +1755,7 @@ cc_library(
"//tensorflow/compiler/xla:util",
"//tensorflow/core:lib",
"@com_google_absl//absl/strings",
+ "@com_google_absl//absl/strings:str_format",
],
)
@@ -2135,6 +2145,7 @@ cc_library(
"@com_google_absl//absl/container:inlined_vector",
"@com_google_absl//absl/memory",
"@com_google_absl//absl/strings",
+ "@com_google_absl//absl/strings:str_format",
],
)
@@ -2187,6 +2198,7 @@ cc_library(
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
"@com_google_absl//absl/strings",
+ "@com_google_absl//absl/strings:str_format",
],
)
@@ -2325,6 +2337,7 @@ cc_library(
"//tensorflow/core:lib_internal",
"@com_google_absl//absl/container:inlined_vector",
"@com_google_absl//absl/strings",
+ "@com_google_absl//absl/strings:str_format",
],
)
@@ -2448,6 +2461,7 @@ cc_library(
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
"@com_google_absl//absl/strings",
+ "@com_google_absl//absl/strings:str_format",
],
)
@@ -2803,6 +2817,7 @@ cc_library(
"//tensorflow/core:lib_internal",
"//tensorflow/core:regexp_internal",
"@com_google_absl//absl/strings",
+ "@com_google_absl//absl/strings:str_format",
"@com_google_absl//absl/types:optional",
],
alwayslink = 1,
@@ -3143,13 +3158,13 @@ cc_library(
cc_library(
name = "source_map_util",
- srcs = ["source_map_util.cc"],
+ srcs = [],
hdrs = ["source_map_util.h"],
deps = [
":executable",
"//tensorflow/compiler/xla:status",
- "//tensorflow/compiler/xla:util",
"//tensorflow/core:lib",
+ "@com_google_absl//absl/strings:str_format",
],
)
@@ -3199,11 +3214,11 @@ cc_library(
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla:xla_data_proto",
- "//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
"@com_google_absl//absl/algorithm:container",
"@com_google_absl//absl/memory",
"@com_google_absl//absl/strings",
+ "@com_google_absl//absl/strings:str_format",
],
)
diff --git a/tensorflow/compiler/xla/service/allocation_tracker.cc b/tensorflow/compiler/xla/service/allocation_tracker.cc
index 5115a14df0..1ed6142dce 100644
--- a/tensorflow/compiler/xla/service/allocation_tracker.cc
+++ b/tensorflow/compiler/xla/service/allocation_tracker.cc
@@ -69,8 +69,7 @@ StatusOr<GlobalDataHandle> AllocationTracker::RegisterInternal(
return InvalidArgument(
"AllocationTracker for platform %s cannot register buffer from "
"platform %s",
- backend_->platform()->Name().c_str(),
- shaped_buffer.platform()->Name().c_str());
+ backend_->platform()->Name(), shaped_buffer.platform()->Name());
}
}
@@ -125,7 +124,7 @@ Status AllocationTracker::Unregister(const GlobalDataHandle& data) {
// "handle does not exist".
auto it = handle_to_shaped_buffers_.find(data.handle());
if (it == handle_to_shaped_buffers_.end()) {
- return NotFound("no allocation record for global data handle: %lld",
+ return NotFound("no allocation record for global data handle: %d",
data.handle());
}
for (auto& shaped_buffer : it->second) {
@@ -144,7 +143,7 @@ StatusOr<std::vector<GlobalDataHandle>> AllocationTracker::DeconstructTuple(
// the same for all buffers across replicas.
const ShapedBuffer* shaped_buffer = replicated_buffers[0];
if (!ShapeUtil::IsTuple(shaped_buffer->on_host_shape())) {
- return InvalidArgument("global data handle %lld is not a tuple",
+ return InvalidArgument("global data handle %d is not a tuple",
data.handle());
}
// If the on-host representation is a tuple, then the on-device one should be
@@ -201,14 +200,14 @@ StatusOr<std::vector<const ShapedBuffer*>> AllocationTracker::ResolveInternal(
VLOG(2) << "resolve:" << data.handle();
auto it = handle_to_shaped_buffers_.find(data.handle());
if (it == handle_to_shaped_buffers_.end()) {
- return NotFound("no allocation record for global data handle: %lld",
+ return NotFound("no allocation record for global data handle: %d",
data.handle());
}
std::vector<const ShapedBuffer*> replicated_buffers;
for (const auto& shaped_buffer : it->second) {
if (shaped_buffer == nullptr) {
- return InvalidArgument(
- "global data handle %lld was previously deallocated", data.handle());
+ return InvalidArgument("global data handle %d was previously deallocated",
+ data.handle());
}
replicated_buffers.push_back(shaped_buffer.get());
}
diff --git a/tensorflow/compiler/xla/service/backend.cc b/tensorflow/compiler/xla/service/backend.cc
index 841d0fa85b..a6889cb171 100644
--- a/tensorflow/compiler/xla/service/backend.cc
+++ b/tensorflow/compiler/xla/service/backend.cc
@@ -177,7 +177,7 @@ StatusOr<se::StreamExecutor*> Backend::stream_executor(
}
}
return InvalidArgument("device %s not supported by XLA service",
- device_name(device_ordinal).c_str());
+ device_name(device_ordinal));
}
StatusOr<bool> Backend::devices_equivalent(int device_ordinal_a,
diff --git a/tensorflow/compiler/xla/service/buffer_assignment.cc b/tensorflow/compiler/xla/service/buffer_assignment.cc
index c8c36ae60e..b11f15ec7b 100644
--- a/tensorflow/compiler/xla/service/buffer_assignment.cc
+++ b/tensorflow/compiler/xla/service/buffer_assignment.cc
@@ -24,6 +24,7 @@ limitations under the License.
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
+#include "absl/strings/str_format.h"
#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/service/buffer_value_containers.h"
#include "tensorflow/compiler/xla/service/heap_simulator.h"
@@ -37,17 +38,15 @@ limitations under the License.
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/numbers.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
namespace xla {
namespace {
using absl::StrAppend;
+using absl::StrAppendFormat;
using ::tensorflow::gtl::FlatMap;
using ::tensorflow::gtl::FlatSet;
-using ::tensorflow::strings::Appendf;
using ::tensorflow::strings::HumanReadableNumBytes;
-using ::tensorflow::strings::Printf;
template <typename T>
string ColocatedBufferSetsToString(const T& container, const char* title) {
@@ -105,7 +104,7 @@ Status GatherComputationsByAllocationType(
return InvalidArgument(
"computation %s has conflicting allocation requirements (global "
"and thread-local)",
- computation->name().c_str());
+ computation->name());
}
if (is_thread_local) {
@@ -128,7 +127,7 @@ Status GatherComputationsByAllocationType(
return InvalidArgument(
"computation %s cannot contain call/while op because it "
"requires thread-local buffer allocations",
- computation->name().c_str());
+ computation->name());
}
worklist.push_back(std::make_pair(subcomputation,
false)); // Not thread local.
@@ -145,9 +144,8 @@ Status GatherComputationsByAllocationType(
true)); // Thread local.
break;
default:
- return InternalError(
- "Unexpected calling opcode: %s",
- HloOpcodeString(instruction->opcode()).c_str());
+ return InternalError("Unexpected calling opcode: %s",
+ HloOpcodeString(instruction->opcode()));
}
}
}
@@ -296,7 +294,7 @@ BufferAllocationProto BufferAllocation::ToProto() const {
string BufferAllocation::ToString() const {
string output;
- Appendf(&output, "allocation %lld: %p, size %lld", index_, this, size());
+ StrAppendFormat(&output, "allocation %d: %p, size %d", index_, this, size());
if (color().value() != 0) {
StrAppend(&output, ", color ", color().value());
}
@@ -328,11 +326,10 @@ string BufferAllocation::ToString() const {
});
for (const LogicalBuffer* buffer : sorted_buffers) {
const OffsetSize& offset_size = FindOrDie(assigned_buffers_, buffer);
- StrAppend(&output,
- tensorflow::strings::Printf(
- " %s [%lld,%lld]: %s\n", buffer->ToString().c_str(),
- offset_size.offset, offset_size.size,
- ShapeUtil::HumanStringWithLayout(buffer->shape()).c_str()));
+ StrAppend(&output, absl::StrFormat(
+ " %s [%d,%d]: %s\n", buffer->ToString(),
+ offset_size.offset, offset_size.size,
+ ShapeUtil::HumanStringWithLayout(buffer->shape())));
}
return output;
}
@@ -425,7 +422,7 @@ StatusOr<BufferAllocation::Slice> BufferAssignment::GetUniqueSlice(
return FailedPrecondition(
"BufferAllocation::Slice for instruction %s at index %s cannot "
"be determined at compile-time.",
- instruction->name().c_str(), index.ToString().c_str());
+ instruction->name(), index.ToString());
}
} else {
VLOG(3) << "No allocation";
@@ -434,7 +431,7 @@ StatusOr<BufferAllocation::Slice> BufferAssignment::GetUniqueSlice(
if (result.allocation() == nullptr) {
return FailedPrecondition(
"BufferAllocation::Slice not assigned for instruction %s at index %s",
- instruction->name().c_str(), index.ToString().c_str());
+ instruction->name(), index.ToString());
}
return result;
}
@@ -646,30 +643,29 @@ Status BufferAssignment::ComputeSummaryStats() {
string BufferAssignment::Stats::ToString() const {
string s;
- Appendf(&s, "BufferAssignment stats:\n");
- Appendf(&s, " parameter allocation: %10s\n",
- HumanReadableNumBytes(parameter_allocation_bytes).c_str());
- Appendf(&s, " constant allocation: %10s\n",
- HumanReadableNumBytes(constant_allocation_bytes).c_str());
- Appendf(&s, " maybe_live_out allocation: %10s\n",
- HumanReadableNumBytes(maybe_live_out_allocation_bytes).c_str());
- Appendf(&s, " preallocated temp allocation: %10s\n",
- HumanReadableNumBytes(preallocated_temp_allocation_bytes).c_str());
+ StrAppendFormat(&s, "BufferAssignment stats:\n");
+ StrAppendFormat(&s, " parameter allocation: %10s\n",
+ HumanReadableNumBytes(parameter_allocation_bytes));
+ StrAppendFormat(&s, " constant allocation: %10s\n",
+ HumanReadableNumBytes(constant_allocation_bytes));
+ StrAppendFormat(&s, " maybe_live_out allocation: %10s\n",
+ HumanReadableNumBytes(maybe_live_out_allocation_bytes));
+ StrAppendFormat(&s, " preallocated temp allocation: %10s\n",
+ HumanReadableNumBytes(preallocated_temp_allocation_bytes));
if (preallocated_temp_fragmentation_bytes >= 0) {
const double percent = 100. * preallocated_temp_fragmentation_bytes /
preallocated_temp_allocation_bytes;
- Appendf(
+ StrAppendFormat(
&s, " preallocated temp fragmentation: %10s (%.2f%%)\n",
- HumanReadableNumBytes(preallocated_temp_fragmentation_bytes).c_str(),
- percent);
+ HumanReadableNumBytes(preallocated_temp_fragmentation_bytes), percent);
}
- Appendf(&s, " total allocation: %10s\n",
- HumanReadableNumBytes(total_allocation_bytes).c_str());
+ StrAppendFormat(&s, " total allocation: %10s\n",
+ HumanReadableNumBytes(total_allocation_bytes));
if (total_fragmentation_bytes >= 0) {
const double percent =
100. * total_fragmentation_bytes / total_allocation_bytes;
- Appendf(&s, " total fragmentation: %10s (%.2f%%)\n",
- HumanReadableNumBytes(total_fragmentation_bytes).c_str(), percent);
+ StrAppendFormat(&s, " total fragmentation: %10s (%.2f%%)\n",
+ HumanReadableNumBytes(total_fragmentation_bytes), percent);
}
return s;
}
diff --git a/tensorflow/compiler/xla/service/buffer_liveness.cc b/tensorflow/compiler/xla/service/buffer_liveness.cc
index 8d0ac3b84a..9b2783a214 100644
--- a/tensorflow/compiler/xla/service/buffer_liveness.cc
+++ b/tensorflow/compiler/xla/service/buffer_liveness.cc
@@ -20,6 +20,7 @@ limitations under the License.
#include <utility>
#include <vector>
+#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/logical_buffer.h"
@@ -29,7 +30,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/lib/core/errors.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/logging.h"
namespace xla {
@@ -75,19 +75,17 @@ Status BufferLiveness::Analyze() {
string BufferLiveness::ToString() const {
std::vector<string> pieces;
- pieces.push_back(tensorflow::strings::Printf("BufferLiveness(module=%s):",
- module_->name().c_str()));
+ pieces.push_back(
+ absl::StrFormat("BufferLiveness(module=%s):", module_->name()));
pieces.push_back("HloOrdering:");
pieces.push_back(hlo_ordering_->ToString());
- pieces.push_back(tensorflow::strings::Printf("Aliased buffers:"));
+ pieces.push_back("Aliased buffers:");
for (const LogicalBuffer* buffer : aliased_buffers_) {
- pieces.push_back(
- tensorflow::strings::Printf(" %s", buffer->ToString().c_str()));
+ pieces.push_back(absl::StrFormat(" %s", buffer->ToString()));
}
- pieces.push_back(tensorflow::strings::Printf("Live out buffers:"));
+ pieces.push_back("Live out buffers:");
for (const LogicalBuffer* buffer : maybe_live_out_buffers_) {
- pieces.push_back(
- tensorflow::strings::Printf(" %s", buffer->ToString().c_str()));
+ pieces.push_back(absl::StrFormat(" %s", buffer->ToString()));
}
return absl::StrJoin(pieces, "\n");
}
diff --git a/tensorflow/compiler/xla/service/call_graph.cc b/tensorflow/compiler/xla/service/call_graph.cc
index 37523a73ff..23b2a32709 100644
--- a/tensorflow/compiler/xla/service/call_graph.cc
+++ b/tensorflow/compiler/xla/service/call_graph.cc
@@ -19,19 +19,19 @@ limitations under the License.
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
+#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/types.h"
namespace xla {
+using absl::StrAppendFormat;
using absl::StrCat;
-using ::tensorflow::strings::Appendf;
string CallContextToString(CallContext context) {
switch (context) {
@@ -356,20 +356,20 @@ CallGraph::NearestAncestorsInSameComputation(HloInstruction* a,
string CallGraph::ToString() const {
string out;
- Appendf(&out, "Call graph for module %s:\n", module_->name().c_str());
+ StrAppendFormat(&out, "Call graph for module %s:\n", module_->name());
for (const CallGraphNode& node : nodes()) {
- Appendf(&out, "Computation %s:\n", node.computation()->name().c_str());
- Appendf(&out, " calls:\n");
+ StrAppendFormat(&out, "Computation %s:\n", node.computation()->name());
+ StrAppendFormat(&out, " calls:\n");
for (const HloComputation* callee : node.callees()) {
- Appendf(&out, " %s\n", callee->name().c_str());
+ StrAppendFormat(&out, " %s\n", callee->name());
}
- Appendf(&out, " called by:\n");
+ StrAppendFormat(&out, " called by:\n");
for (const HloComputation* caller : node.callers()) {
- Appendf(&out, " %s\n", caller->name().c_str());
+ StrAppendFormat(&out, " %s\n", caller->name());
}
- Appendf(&out, " callsites:\n");
+ StrAppendFormat(&out, " callsites:\n");
for (const CallSite& callsite : node.callsites()) {
- Appendf(&out, " %s\n", callsite.ToString().c_str());
+ StrAppendFormat(&out, " %s\n", callsite.ToString());
}
}
return out;
diff --git a/tensorflow/compiler/xla/service/call_inliner.cc b/tensorflow/compiler/xla/service/call_inliner.cc
index 256d05a73e..1d42140444 100644
--- a/tensorflow/compiler/xla/service/call_inliner.cc
+++ b/tensorflow/compiler/xla/service/call_inliner.cc
@@ -96,7 +96,7 @@ class SubcomputationInsertionVisitor : public DfsHloVisitorWithDefault {
if (it == subcomputation_hlo_to_new_hlo_.end()) {
return NotFound(
"Could not find mapping from subcomputation HLO %s to a cloned HLO.",
- subcomputation_hlo->ToString().c_str());
+ subcomputation_hlo->ToString());
}
return it->second;
}
diff --git a/tensorflow/compiler/xla/service/channel_tracker.cc b/tensorflow/compiler/xla/service/channel_tracker.cc
index 601a3e9a01..3c2d1ae6d8 100644
--- a/tensorflow/compiler/xla/service/channel_tracker.cc
+++ b/tensorflow/compiler/xla/service/channel_tracker.cc
@@ -73,20 +73,20 @@ ChannelHandle ChannelTracker::AllocateHandle(ChannelHandle::ChannelType type) {
Status ChannelTracker::RegisterSendInternal(const ChannelHandle& handle) {
if (opaque_to_channel_.count(handle.handle()) == 0) {
- return NotFound("channel handle not found: %lld", handle.handle());
+ return NotFound("channel handle not found: %d", handle.handle());
}
Channel& channel = opaque_to_channel_[handle.handle()];
if (channel.type == ChannelHandle::HOST_TO_DEVICE) {
return FailedPrecondition(
"host-to-device channels cannot be used with a Send operation; "
- "channel handle: %lld",
+ "channel handle: %d",
handle.handle());
}
if (channel.has_sender) {
return FailedPrecondition(
"when registering send, passed a channel handle that is already used "
- "by a sender: %lld",
+ "by a sender: %d",
handle.handle());
}
channel.has_sender = true;
@@ -95,13 +95,13 @@ Status ChannelTracker::RegisterSendInternal(const ChannelHandle& handle) {
Status ChannelTracker::RegisterRecvInternal(const ChannelHandle& handle) {
if (opaque_to_channel_.count(handle.handle()) == 0) {
- return NotFound("channel handle not found: %lld", handle.handle());
+ return NotFound("channel handle not found: %d", handle.handle());
}
Channel& channel = opaque_to_channel_[handle.handle()];
if (channel.type == ChannelHandle::DEVICE_TO_HOST) {
return FailedPrecondition(
"device-to-host channels cannot be used with a Recv operation; "
- "channel handle: %lld",
+ "channel handle: %d",
handle.handle());
}
@@ -109,7 +109,7 @@ Status ChannelTracker::RegisterRecvInternal(const ChannelHandle& handle) {
if (channel.receiver_count >= 1) {
return FailedPrecondition(
"when registering recv, passed a channel handle that is already used "
- "by a receiver: %lld",
+ "by a receiver: %d",
handle.handle());
}
channel.receiver_count += 1;
diff --git a/tensorflow/compiler/xla/service/compiler.cc b/tensorflow/compiler/xla/service/compiler.cc
index 6b3b9820f0..687ecafe0c 100644
--- a/tensorflow/compiler/xla/service/compiler.cc
+++ b/tensorflow/compiler/xla/service/compiler.cc
@@ -101,7 +101,7 @@ Compiler::GetPlatformCompilers() {
return NotFound(
"could not find registered compiler for platform %s -- check "
"target linkage",
- platform->Name().c_str());
+ platform->Name());
}
// And then we invoke the factory, placing the result into the mapping.
diff --git a/tensorflow/compiler/xla/service/computation_placer.cc b/tensorflow/compiler/xla/service/computation_placer.cc
index 61b1dba6c9..2210a8578a 100644
--- a/tensorflow/compiler/xla/service/computation_placer.cc
+++ b/tensorflow/compiler/xla/service/computation_placer.cc
@@ -132,7 +132,7 @@ StatusOr<DeviceAssignment> ComputationPlacer::AssignDevices(
return NotFound(
"could not find registered computation placer for platform %s -- check "
"target linkage",
- platform->Name().c_str());
+ platform->Name());
}
if (it->second.placer == nullptr) {
diff --git a/tensorflow/compiler/xla/service/cpu/BUILD b/tensorflow/compiler/xla/service/cpu/BUILD
index e01fecffd0..f0adfc5d45 100644
--- a/tensorflow/compiler/xla/service/cpu/BUILD
+++ b/tensorflow/compiler/xla/service/cpu/BUILD
@@ -235,6 +235,7 @@ cc_library(
"//tensorflow/core:lib",
"//tensorflow/core:stream_executor_no_cuda",
"@com_google_absl//absl/strings",
+ "@com_google_absl//absl/strings:str_format",
"@llvm//:orc_jit",
],
)
@@ -283,6 +284,7 @@ cc_library(
"//tensorflow/compiler/xla/service/llvm_ir:tuple_ops",
"//tensorflow/core:lib",
"@com_google_absl//absl/strings",
+ "@com_google_absl//absl/strings:str_format",
"@llvm//:code_gen",
"@llvm//:core",
"@llvm//:support",
@@ -338,12 +340,12 @@ cc_library(
hdrs = ["parallel_loop_emitter.h"],
deps = [
":ir_emission_utils",
- "//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/compiler/xla/service/llvm_ir:ir_array",
"//tensorflow/compiler/xla/service/llvm_ir:llvm_loop",
"//tensorflow/compiler/xla/service/llvm_ir:llvm_util",
"//tensorflow/compiler/xla/service/llvm_ir:loop_emitter",
"//tensorflow/core:lib",
+ "@com_google_absl//absl/strings:str_format",
"@llvm//:core",
],
)
@@ -391,6 +393,7 @@ tf_cc_binary(
"//tensorflow/compiler/xla/client:xla_builder",
"//tensorflow/compiler/xla/client:xla_computation",
"//tensorflow/core:lib",
+ "@com_google_absl//absl/strings:str_format",
],
)
@@ -404,6 +407,7 @@ cc_library(
"//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:util",
"//tensorflow/core:lib",
+ "@com_google_absl//absl/strings:str_format",
"@llvm//:mc",
"@llvm//:mc_disassembler",
"@llvm//:object",
@@ -645,6 +649,7 @@ tf_cc_test(
"//tensorflow/core:test",
"//third_party/eigen3",
"@com_google_absl//absl/memory",
+ "@com_google_absl//absl/strings:str_format",
],
)
diff --git a/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc b/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc
index 279aa42fe2..6420180b13 100644
--- a/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc
+++ b/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc
@@ -705,8 +705,7 @@ CpuCompiler::CompileAheadOfTime(std::vector<std::unique_ptr<HloModule>> modules,
const llvm::Target* target =
llvm::TargetRegistry::lookupTarget(triple.getTriple(), error);
if (target == nullptr) {
- return InternalError("TargetRegistry::lookupTarget failed: %s",
- error.c_str());
+ return InternalError("TargetRegistry::lookupTarget failed: %s", error);
}
llvm::Reloc::Model reloc_model = llvm::Reloc::Static;
diff --git a/tensorflow/compiler/xla/service/cpu/cpu_executable.cc b/tensorflow/compiler/xla/service/cpu/cpu_executable.cc
index fbcbbbd200..08773693fb 100644
--- a/tensorflow/compiler/xla/service/cpu/cpu_executable.cc
+++ b/tensorflow/compiler/xla/service/cpu/cpu_executable.cc
@@ -23,6 +23,7 @@ limitations under the License.
#include <vector>
#include "absl/strings/str_cat.h"
+#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
#include "tensorflow/compiler/xla/service/buffer_assignment.h"
@@ -37,7 +38,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mem.h"
@@ -171,20 +171,18 @@ Status CpuExecutable::ExecuteComputeFunction(
void* result_buffer = buffer_pointers[result_slice.index()];
if (VLOG_IS_ON(3)) {
VLOG(3) << "Executing compute function:";
- VLOG(3) << tensorflow::strings::Printf(
- " func(void* result, void* params[null], void* temps[%zu], "
- "uint64 profile_counters[%zu])",
+ VLOG(3) << absl::StrFormat(
+ " func(void* result, void* params[null], void* temps[%u], "
+ "uint64 profile_counters[%u])",
buffer_pointers.size(), profile_counters_size);
- VLOG(3) << tensorflow::strings::Printf(" result = %p", result_buffer);
+ VLOG(3) << absl::StrFormat(" result = %p", result_buffer);
auto ptr_printer = [](string* out, const void* p) {
- absl::StrAppend(out, tensorflow::strings::Printf("%p", p));
+ absl::StrAppend(out, absl::StrFormat("%p", p));
};
VLOG(3) << " params = nullptr";
- VLOG(3) << tensorflow::strings::Printf(
- " temps = [%s]",
- absl::StrJoin(buffer_pointers, ", ", ptr_printer).c_str());
- VLOG(3) << tensorflow::strings::Printf(" profile_counters = %p",
- profile_counters);
+ VLOG(3) << absl::StrFormat(
+ " temps = [%s]", absl::StrJoin(buffer_pointers, ", ", ptr_printer));
+ VLOG(3) << absl::StrFormat(" profile_counters = %p", profile_counters);
}
compute_function_(result_buffer, run_options, nullptr, buffer_pointers.data(),
diff --git a/tensorflow/compiler/xla/service/cpu/cpu_hlo_support_checker.cc b/tensorflow/compiler/xla/service/cpu/cpu_hlo_support_checker.cc
index 7bd4741a04..7fbe0fa157 100644
--- a/tensorflow/compiler/xla/service/cpu/cpu_hlo_support_checker.cc
+++ b/tensorflow/compiler/xla/service/cpu/cpu_hlo_support_checker.cc
@@ -34,9 +34,8 @@ StatusOr<bool> CpuHloSupportChecker::Run(HloModule* module) {
return xla::Unimplemented(
"CPU backend does not support HLO instruction %s with shape "
"containing a sparse layout: %s",
- instruction->ToString().c_str(),
- ShapeUtil::HumanStringWithLayout(instruction->shape())
- .c_str());
+ instruction->ToString(),
+ ShapeUtil::HumanStringWithLayout(instruction->shape()));
}
return Status::OK();
}));
diff --git a/tensorflow/compiler/xla/service/cpu/cpu_runtime_test.cc b/tensorflow/compiler/xla/service/cpu/cpu_runtime_test.cc
index bc4cfc0999..1ae3aa5711 100644
--- a/tensorflow/compiler/xla/service/cpu/cpu_runtime_test.cc
+++ b/tensorflow/compiler/xla/service/cpu/cpu_runtime_test.cc
@@ -20,6 +20,7 @@ limitations under the License.
#include <tuple>
#include "absl/memory/memory.h"
+#include "absl/strings/str_format.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/compiler/xla/array2d.h"
#include "tensorflow/compiler/xla/client/local_client.h"
@@ -28,7 +29,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/cpu/runtime_single_threaded_matmul.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/core/common_runtime/eigen_thread_pool.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
@@ -142,10 +142,10 @@ class EigenMatMulTest : public CpuRuntimeTest,
bool transpose_rhs = std::get<2>(info.param);
bool single_threaded = std::get<3>(info.param);
- return tensorflow::strings::Printf(
- "EigenMatMul_%lld_%lld_%lld_%s%s%s_threaded", shape.m, shape.k, shape.n,
- transpose_lhs ? "Tlhs_" : "", transpose_rhs ? "Trhs_" : "",
- single_threaded ? "single" : "multi");
+ return absl::StrFormat("EigenMatMul_%d_%d_%d_%s%s%s_threaded", shape.m,
+ shape.k, shape.n, transpose_lhs ? "Tlhs_" : "",
+ transpose_rhs ? "Trhs_" : "",
+ single_threaded ? "single" : "multi");
}
};
@@ -178,10 +178,10 @@ class MKLMatMulTest : public CpuRuntimeTest,
bool transpose_rhs = std::get<2>(info.param);
bool single_threaded = std::get<3>(info.param);
- return tensorflow::strings::Printf(
- "MKLMatMul_%lld_%lld_%lld_%s%s%s_threaded", shape.m, shape.k, shape.n,
- transpose_lhs ? "Tlhs_" : "", transpose_rhs ? "Trhs_" : "",
- single_threaded ? "single" : "multi");
+ return absl::StrFormat("MKLMatMul_%d_%d_%d_%s%s%s_threaded", shape.m,
+ shape.k, shape.n, transpose_lhs ? "Tlhs_" : "",
+ transpose_rhs ? "Trhs_" : "",
+ single_threaded ? "single" : "multi");
}
};
diff --git a/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.cc b/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.cc
index b07cd675ff..0df2abf001 100644
--- a/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.cc
+++ b/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.cc
@@ -104,7 +104,7 @@ Status CpuTransferManager::TransferLiteralToInfeed(
if (ShapeUtil::IsNestedTuple(shape)) {
return Unimplemented(
"Infeed with a nested tuple shape is not supported: %s",
- ShapeUtil::HumanString(literal.shape()).c_str());
+ ShapeUtil::HumanString(literal.shape()));
}
// For a tuple, we transfer each of its elements to the device and
@@ -152,11 +152,11 @@ CpuTransferManager::TransferBufferToInfeedInternal(se::StreamExecutor* executor,
int64 size,
const void* source) {
if (size > std::numeric_limits<int32>::max()) {
- return InvalidArgument("Infeed shape is too large: needs %lld bytes", size);
+ return InvalidArgument("Infeed shape is too large: needs %d bytes", size);
}
if (size <= 0) {
- return InvalidArgument("Infeed shape must have positive size; got %lld",
+ return InvalidArgument("Infeed shape must have positive size; got %d",
size);
}
@@ -244,12 +244,12 @@ StatusOr<Shape> CpuTransferManager::TransferBuffersFromOutfeedInternal(
for (auto b : buffer_data) {
int64 size = b.second;
if (size > std::numeric_limits<int32>::max()) {
- return InvalidArgument("Outfeed shape is too large: needs %lld bytes",
+ return InvalidArgument("Outfeed shape is too large: needs %d bytes",
size);
}
if (size <= 0) {
- return InvalidArgument("Outfeed shape must have positive size; got %lld",
+ return InvalidArgument("Outfeed shape must have positive size; got %d",
size);
}
diff --git a/tensorflow/compiler/xla/service/cpu/disassembler.cc b/tensorflow/compiler/xla/service/cpu/disassembler.cc
index e4c674e227..3ae64142cd 100644
--- a/tensorflow/compiler/xla/service/cpu/disassembler.cc
+++ b/tensorflow/compiler/xla/service/cpu/disassembler.cc
@@ -21,13 +21,13 @@ limitations under the License.
#include <type_traits>
#include <vector>
+#include "absl/strings/str_format.h"
#include "llvm/MC/MCInst.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/util.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
@@ -151,7 +151,7 @@ StatusOr<DisassemblerResult> Disassembler::DisassembleObjectFile(
size = 1;
}
- ostream << tensorflow::strings::Printf("0x%08lx", index) << " ";
+ ostream << absl::StrFormat("0x%08lx", index) << " ";
if (decode_status == llvm::MCDisassembler::Success) {
// For branches, try to determine the actual address and emit it as an
@@ -163,7 +163,7 @@ StatusOr<DisassemblerResult> Disassembler::DisassembleObjectFile(
uint64_t target;
if (inst_analysis_->evaluateBranch(
instruction, section_address + index, size, target)) {
- annotation = tensorflow::strings::Printf("[0x%08lx]", target);
+ annotation = absl::StrFormat("[0x%08lx]", target);
}
}
inst_printer_->printInst(&instruction, ostream, annotation.c_str(),
diff --git a/tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc b/tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc
index 4af16f4fa0..dd060f54a2 100644
--- a/tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc
+++ b/tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc
@@ -1467,7 +1467,7 @@ Status DotOpEmitter::EmitCallToRuntime() {
break;
default:
return Unimplemented("Invalid type %s for dot operation",
- PrimitiveType_Name(type).c_str());
+ PrimitiveType_Name(type));
}
llvm::Type* float_ptr_type = float_type->getPointerTo();
diff --git a/tensorflow/compiler/xla/service/cpu/ir_emitter.cc b/tensorflow/compiler/xla/service/cpu/ir_emitter.cc
index 417a1dba1f..321c2e9896 100644
--- a/tensorflow/compiler/xla/service/cpu/ir_emitter.cc
+++ b/tensorflow/compiler/xla/service/cpu/ir_emitter.cc
@@ -28,6 +28,7 @@ limitations under the License.
#include "tensorflow/core/platform/logging.h"
// IWYU pragma: no_include "llvm/IR/Intrinsics.gen.inc"
#include "absl/strings/str_cat.h"
+#include "absl/strings/str_format.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/IR/BasicBlock.h"
@@ -68,7 +69,6 @@ limitations under the License.
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/lib/gtl/flatmap.h"
#include "tensorflow/core/lib/gtl/flatset.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
namespace xla {
@@ -230,9 +230,8 @@ Status IrEmitter::HandleCopy(HloInstruction* copy) {
// Use the elemental emitter for array shapes.
return DefaultAction(copy);
}
- return Unimplemented(
- "unsupported operand type %s for copy instruction",
- PrimitiveType_Name(copy->shape().element_type()).c_str());
+ return Unimplemented("unsupported operand type %s for copy instruction",
+ PrimitiveType_Name(copy->shape().element_type()));
}
// Calculate the alignment of a buffer allocated for a given primitive type.
@@ -389,7 +388,7 @@ Status IrEmitter::EmitXfeedTransfer(XfeedKind kind, const Shape& shape,
int64 length = ByteSizeOf(shape);
if (length <= 0 || length > std::numeric_limits<int32>::max()) {
return InvalidArgument(
- "xfeed (infeed or outfeed) buffer length %lld is outside the valid "
+ "xfeed (infeed or outfeed) buffer length %d is outside the valid "
"size range",
length);
}
@@ -1620,9 +1619,8 @@ StatusOr<bool> IrEmitter::EmitVectorizedReduce(
int64 dimension = LayoutUtil::Minor(reduce->shape().layout(), i);
int64 start_index = 0;
int64 end_index = reduce->shape().dimensions(dimension);
- std::unique_ptr<llvm_ir::ForLoop> loop =
- loop_nest.AddLoop(start_index, end_index,
- tensorflow::strings::Printf("dim.%lld", dimension));
+ std::unique_ptr<llvm_ir::ForLoop> loop = loop_nest.AddLoop(
+ start_index, end_index, absl::StrFormat("dim.%d", dimension));
array_index[dimension] = loop->GetIndVarValue();
}
@@ -1641,9 +1639,9 @@ StatusOr<bool> IrEmitter::EmitVectorizedReduce(
int64 start_index = 0;
int64 end_index = (innermost_dimension_size / vectorization_factor) *
vectorization_factor;
- std::unique_ptr<llvm_ir::ForLoop> loop = loop_nest.AddLoop(
- start_index, end_index, vectorization_factor,
- tensorflow::strings::Printf("dim.%lld", innermost_dimension));
+ std::unique_ptr<llvm_ir::ForLoop> loop =
+ loop_nest.AddLoop(start_index, end_index, vectorization_factor,
+ absl::StrFormat("dim.%d", innermost_dimension));
array_index[innermost_dimension] = loop->GetIndVarValue();
SetToFirstInsertPoint(loop->GetBodyBasicBlock(), &b_);
@@ -2170,8 +2168,8 @@ Status IrEmitter::HandleWhile(HloInstruction* xla_while) {
return InternalError(
"instruction %s %s does not share slice with "
"instruction %s %s",
- a->ToString().c_str(), slice_a.ToString().c_str(),
- b->ToString().c_str(), slice_b.ToString().c_str());
+ a->ToString(), slice_a.ToString(), b->ToString(),
+ slice_b.ToString());
}
return Status::OK();
};
@@ -2826,8 +2824,8 @@ Status IrEmitter::ElementTypesSameAndSupported(
if (std::find(supported_types.begin(), supported_types.end(),
primitive_type) == supported_types.end()) {
return Unimplemented("unsupported operand type %s in op %s",
- PrimitiveType_Name(primitive_type).c_str(),
- HloOpcodeString(instruction.opcode()).c_str());
+ PrimitiveType_Name(primitive_type),
+ HloOpcodeString(instruction.opcode()));
}
return Status::OK();
}
diff --git a/tensorflow/compiler/xla/service/cpu/parallel_loop_emitter.cc b/tensorflow/compiler/xla/service/cpu/parallel_loop_emitter.cc
index aedb069dce..f8441c3e34 100644
--- a/tensorflow/compiler/xla/service/cpu/parallel_loop_emitter.cc
+++ b/tensorflow/compiler/xla/service/cpu/parallel_loop_emitter.cc
@@ -15,9 +15,9 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/cpu/parallel_loop_emitter.h"
+#include "absl/strings/str_format.h"
#include "tensorflow/compiler/xla/service/llvm_ir/llvm_loop.h"
#include "tensorflow/compiler/xla/service/llvm_ir/llvm_util.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
namespace xla {
namespace cpu {
@@ -52,15 +52,15 @@ ParallelLoopEmitter::EmitIndexAndSetExitBasicBlock(absl::string_view loop_name,
llvm::Value* end_index = (*dynamic_loop_bounds_)[bounds_index].second;
std::unique_ptr<llvm_ir::ForLoop> loop = loop_nest.AddLoop(
- /*suffix=*/tensorflow::strings::Printf("dim.%lld", dimension),
- start_index, end_index);
+ /*suffix=*/absl::StrFormat("dim.%d", dimension), start_index,
+ end_index);
array_index[dimension] = loop->GetIndVarValue();
} else {
// Emit static loop bounds for this dimension.
std::unique_ptr<llvm_ir::ForLoop> loop = loop_nest.AddLoop(
/*start_index=*/0,
/*end_index=*/shape_.dimensions(dimension),
- /*suffix=*/tensorflow::strings::Printf("dim.%lld", dimension));
+ /*suffix=*/absl::StrFormat("dim.%d", dimension));
array_index[dimension] = loop->GetIndVarValue();
}
}
diff --git a/tensorflow/compiler/xla/service/cpu/sample_harness.cc b/tensorflow/compiler/xla/service/cpu/sample_harness.cc
index f227e4ae13..942e2ddd39 100644
--- a/tensorflow/compiler/xla/service/cpu/sample_harness.cc
+++ b/tensorflow/compiler/xla/service/cpu/sample_harness.cc
@@ -16,6 +16,7 @@ limitations under the License.
#include <memory>
#include <string>
+#include "absl/strings/str_format.h"
#include "tensorflow/compiler/xla/array4d.h"
#include "tensorflow/compiler/xla/client/client.h"
#include "tensorflow/compiler/xla/client/client_library.h"
@@ -27,7 +28,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/logging.h"
@@ -67,8 +67,8 @@ int main(int argc, char** argv) {
/*execution_profile=*/&profile);
std::unique_ptr<xla::Literal> actual = result.ConsumeValueOrDie();
- LOG(INFO) << tensorflow::strings::Printf("computation took %lldns",
- profile.compute_time_ns());
+ LOG(INFO) << absl::StrFormat("computation took %dns",
+ profile.compute_time_ns());
LOG(INFO) << actual->ToString();
return 0;
diff --git a/tensorflow/compiler/xla/service/cpu/tests/cpu_intrinsic_test.cc b/tensorflow/compiler/xla/service/cpu/tests/cpu_intrinsic_test.cc
index 9457e57d7b..a434c04a98 100644
--- a/tensorflow/compiler/xla/service/cpu/tests/cpu_intrinsic_test.cc
+++ b/tensorflow/compiler/xla/service/cpu/tests/cpu_intrinsic_test.cc
@@ -65,8 +65,8 @@ class CpuUnaryIntrinsicTest
features = "";
}
- return absl::StrCat(opcode.c_str(), "_On_", triple.c_str(),
- features.empty() ? "" : "_With", features.c_str());
+ return absl::StrCat(opcode, "_On_", triple,
+ (features.empty() ? "" : "_With"), features);
}
};
diff --git a/tensorflow/compiler/xla/service/device_memory_allocator.cc b/tensorflow/compiler/xla/service/device_memory_allocator.cc
index e228bb56bc..1d0297cfbf 100644
--- a/tensorflow/compiler/xla/service/device_memory_allocator.cc
+++ b/tensorflow/compiler/xla/service/device_memory_allocator.cc
@@ -36,9 +36,8 @@ StatusOr<OwningDeviceMemory> StreamExecutorMemoryAllocator::Allocate(
se::DeviceMemoryBase result = stream_executor->AllocateArray<uint8>(size);
if (size > 0 && result == nullptr) {
return ResourceExhausted(
- "Failed to allocate request for %s (%lluB) on device ordinal %d",
- tensorflow::strings::HumanReadableNumBytes(size).c_str(), size,
- device_ordinal);
+ "Failed to allocate request for %s (%uB) on device ordinal %d",
+ tensorflow::strings::HumanReadableNumBytes(size), size, device_ordinal);
}
return OwningDeviceMemory(result, device_ordinal, this);
}
@@ -61,12 +60,12 @@ StatusOr<se::StreamExecutor*> StreamExecutorMemoryAllocator::GetStreamExecutor(
}
if (device_ordinal >= stream_executors_.size()) {
return InvalidArgument(
- "device ordinal value (%d) >= number of devices (%zu)", device_ordinal,
+ "device ordinal value (%d) >= number of devices (%u)", device_ordinal,
stream_executors_.size());
}
if (stream_executors_[device_ordinal] == nullptr) {
return NotFound("Device %s:%d present but not supported",
- platform()->Name().c_str(), device_ordinal);
+ platform()->Name(), device_ordinal);
}
return stream_executors_[device_ordinal];
}
diff --git a/tensorflow/compiler/xla/service/dfs_hlo_visitor.cc b/tensorflow/compiler/xla/service/dfs_hlo_visitor.cc
index 2172ae0a29..3e7373adc5 100644
--- a/tensorflow/compiler/xla/service/dfs_hlo_visitor.cc
+++ b/tensorflow/compiler/xla/service/dfs_hlo_visitor.cc
@@ -28,14 +28,14 @@ template <typename HloInstructionPtr>
Status DfsHloVisitorBase<HloInstructionPtr>::HandleElementwiseUnary(
HloInstructionPtr hlo) {
return Unimplemented("DfsHloVisitor::HandleElementwiseUnary: %s",
- HloOpcodeString(hlo->opcode()).c_str());
+ HloOpcodeString(hlo->opcode()));
}
template <typename HloInstructionPtr>
Status DfsHloVisitorBase<HloInstructionPtr>::HandleElementwiseBinary(
HloInstructionPtr hlo) {
return Unimplemented("DfsHloVisitor::HandleElementwiseBinary: %s",
- HloOpcodeString(hlo->opcode()).c_str());
+ HloOpcodeString(hlo->opcode()));
}
template <typename HloInstructionPtr>
diff --git a/tensorflow/compiler/xla/service/elemental_ir_emitter.cc b/tensorflow/compiler/xla/service/elemental_ir_emitter.cc
index 26af67cc1c..2e5930fb70 100644
--- a/tensorflow/compiler/xla/service/elemental_ir_emitter.cc
+++ b/tensorflow/compiler/xla/service/elemental_ir_emitter.cc
@@ -264,8 +264,8 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitIntegerUnaryOp(
}
}
return Unimplemented("conversion from primitive type %s to %s",
- PrimitiveType_Name(from_type).c_str(),
- PrimitiveType_Name(to_type).c_str());
+ PrimitiveType_Name(from_type),
+ PrimitiveType_Name(to_type));
}
case HloOpcode::kBitcastConvert: {
PrimitiveType from_type = op->operand(0)->shape().element_type();
@@ -282,8 +282,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitIntegerUnaryOp(
return InvalidArgument(
"bitcast conversion from primitive type %s to %s with unequal "
"bit-widths (%u versus %u) ",
- PrimitiveType_Name(from_type).c_str(),
- PrimitiveType_Name(to_type).c_str(),
+ PrimitiveType_Name(from_type), PrimitiveType_Name(to_type),
primitive_util::BitWidth(from_type),
primitive_util::BitWidth(to_type));
}
@@ -332,7 +331,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitIntegerUnaryOp(
}
default:
return Unimplemented("unary integer op '%s'",
- HloOpcodeString(op->opcode()).c_str());
+ HloOpcodeString(op->opcode()));
}
}
@@ -389,8 +388,8 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitFloatUnaryOp(
operand_value, llvm_ir::PrimitiveTypeToIrType(to_type, module_));
}
return Unimplemented("unhandled conversion operation: %s => %s",
- PrimitiveType_Name(from_type).c_str(),
- PrimitiveType_Name(to_type).c_str());
+ PrimitiveType_Name(from_type),
+ PrimitiveType_Name(to_type));
}
case HloOpcode::kBitcastConvert: {
PrimitiveType from_type = op->operand(0)->shape().element_type();
@@ -407,8 +406,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitFloatUnaryOp(
return InvalidArgument(
"bitcast conversion from primitive type %s to %s with unequal "
"bit-widths (%u versus %u) ",
- PrimitiveType_Name(from_type).c_str(),
- PrimitiveType_Name(to_type).c_str(),
+ PrimitiveType_Name(from_type), PrimitiveType_Name(to_type),
primitive_util::BitWidth(from_type),
primitive_util::BitWidth(to_type));
}
@@ -471,7 +469,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitFloatUnaryOp(
return llvm::ConstantFP::get(operand_value->getType(), 0.0);
default:
return Unimplemented("unary floating-point op '%s'",
- HloOpcodeString(op->opcode()).c_str());
+ HloOpcodeString(op->opcode()));
}
}
@@ -683,7 +681,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitComplexUnaryOp(
return EmitExtractImag(operand_value);
default:
return Unimplemented("unary complex op '%s'",
- HloOpcodeString(op->opcode()).c_str());
+ HloOpcodeString(op->opcode()));
}
}
@@ -755,7 +753,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitFloatBinaryOp(
return EmitAtan2(op->shape().element_type(), lhs_value, rhs_value);
default:
return Unimplemented("binary floating point op '%s'",
- HloOpcodeString(op->opcode()).c_str());
+ HloOpcodeString(op->opcode()));
}
}
@@ -873,7 +871,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitComplexBinaryOp(
}
default:
return Unimplemented("binary complex op '%s'",
- HloOpcodeString(op->opcode()).c_str());
+ HloOpcodeString(op->opcode()));
}
}
@@ -1247,7 +1245,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitIntegerBinaryOp(
/*saturate_to_sign_bit=*/false);
default:
return Unimplemented("binary integer op '%s'",
- HloOpcodeString(op->opcode()).c_str());
+ HloOpcodeString(op->opcode()));
}
}
@@ -1378,7 +1376,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::ConvertValueForDistribution(
default:
return InvalidArgument(
"unhandled distribution %s",
- RandomDistribution_Name(hlo->random_distribution()).c_str());
+ RandomDistribution_Name(hlo->random_distribution()));
}
}
@@ -1610,7 +1608,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitElementalClamp(
max_value, EmitIntegralMax(min_value, arg_value, is_signed), is_signed);
} else {
return Unimplemented("Clamp unimplemented for %s",
- PrimitiveType_Name(prim_type).c_str());
+ PrimitiveType_Name(prim_type));
}
}
@@ -2232,7 +2230,7 @@ llvm_ir::ElementGenerator ElementalIrEmitter::MakeElementGenerator(
default:
return [hlo](const IrArray::Index& index) {
return Unimplemented("Unhandled opcode for elemental IR emission: %s",
- HloOpcodeString(hlo->opcode()).c_str());
+ HloOpcodeString(hlo->opcode()));
};
}
}
diff --git a/tensorflow/compiler/xla/service/executable.cc b/tensorflow/compiler/xla/service/executable.cc
index 1c9f396b68..78edf918a4 100644
--- a/tensorflow/compiler/xla/service/executable.cc
+++ b/tensorflow/compiler/xla/service/executable.cc
@@ -16,6 +16,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/executable.h"
#include "absl/memory/memory.h"
+#include "absl/strings/str_format.h"
#include "tensorflow/compiler/xla/legacy_flags/debug_options_flags.h"
#include "tensorflow/compiler/xla/service/hlo_graph_dumper.h"
#include "tensorflow/compiler/xla/status.h"
@@ -23,7 +24,6 @@ limitations under the License.
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/proto_serialization.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/env.h"
using tensorflow::gtl::ArraySlice;
@@ -155,9 +155,9 @@ Status Executable::DumpHloSnapshot() {
const string& directory_path =
module_config().debug_options().xla_dump_executions_to();
const auto& module = hlo_snapshot_->hlo().hlo_module();
- string filename = tensorflow::strings::Printf(
- "computation_%lld__%s__execution_%lld", module.id(),
- module.entry_computation_name().c_str(), ++execution_count_);
+ string filename =
+ absl::StrFormat("computation_%d__%s__execution_%d", module.id(),
+ module.entry_computation_name(), ++execution_count_);
return Executable::DumpToDirectory(directory_path, filename, *hlo_snapshot_);
}
diff --git a/tensorflow/compiler/xla/service/execution_tracker.cc b/tensorflow/compiler/xla/service/execution_tracker.cc
index 70a78c8a2b..997db7c058 100644
--- a/tensorflow/compiler/xla/service/execution_tracker.cc
+++ b/tensorflow/compiler/xla/service/execution_tracker.cc
@@ -66,7 +66,7 @@ Status ExecutionTracker::Unregister(const ExecutionHandle& handle) {
tensorflow::mutex_lock lock(execution_mutex_);
auto it = handle_to_execution_.find(handle.handle());
if (it == handle_to_execution_.end()) {
- return NotFound("no execution record for execution handle: %lld",
+ return NotFound("no execution record for execution handle: %d",
handle.handle());
}
handle_to_execution_.erase(handle.handle());
@@ -78,7 +78,7 @@ StatusOr<const AsyncExecution*> ExecutionTracker::Resolve(
tensorflow::mutex_lock lock(execution_mutex_);
auto it = handle_to_execution_.find(handle.handle());
if (it == handle_to_execution_.end()) {
- return NotFound("no execution record for execution handle: %lld",
+ return NotFound("no execution record for execution handle: %d",
handle.handle());
}
return it->second.get();
diff --git a/tensorflow/compiler/xla/service/gather_expander.cc b/tensorflow/compiler/xla/service/gather_expander.cc
index d889fd8e88..3f1a881372 100644
--- a/tensorflow/compiler/xla/service/gather_expander.cc
+++ b/tensorflow/compiler/xla/service/gather_expander.cc
@@ -323,7 +323,7 @@ StatusOr<HloInstruction*> GatherExpander::ExpandGather(
return Unimplemented(
"Gather operations with more than 2147483647 gather indices are not "
"supported. This error occurred for %s.",
- gather_instr->ToString().c_str());
+ gather_instr->ToString());
}
TF_ASSIGN_OR_RETURN(
diff --git a/tensorflow/compiler/xla/service/gpu/BUILD b/tensorflow/compiler/xla/service/gpu/BUILD
index e53f525517..87b799e78e 100644
--- a/tensorflow/compiler/xla/service/gpu/BUILD
+++ b/tensorflow/compiler/xla/service/gpu/BUILD
@@ -57,6 +57,7 @@ cc_library(
"//tensorflow/core:lib",
"//tensorflow/core:stream_executor_no_cuda",
"@com_google_absl//absl/memory",
+ "@com_google_absl//absl/strings:str_format",
],
)
@@ -110,6 +111,7 @@ tf_cc_test(
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
"//tensorflow/core:lib",
"@com_google_absl//absl/memory",
+ "@com_google_absl//absl/strings:str_format",
],
)
@@ -351,6 +353,7 @@ cc_library(
"//tensorflow/stream_executor",
"@com_google_absl//absl/memory",
"@com_google_absl//absl/strings",
+ "@com_google_absl//absl/strings:str_format",
"@com_google_absl//absl/types:optional",
],
)
@@ -389,6 +392,7 @@ cc_library(
"//tensorflow/core:lib",
"//tensorflow/core:stream_executor_no_cuda",
"@com_google_absl//absl/strings",
+ "@com_google_absl//absl/strings:str_format",
"@com_google_absl//absl/types:optional",
],
)
@@ -819,6 +823,7 @@ tf_cc_test(
"//tensorflow/compiler/xla/tests:hlo_test_base",
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
"@com_google_absl//absl/memory",
+ "@com_google_absl//absl/strings:str_format",
],
)
diff --git a/tensorflow/compiler/xla/service/gpu/buffer_allocations.cc b/tensorflow/compiler/xla/service/gpu/buffer_allocations.cc
index e208ad61e3..86af83b6b9 100644
--- a/tensorflow/compiler/xla/service/gpu/buffer_allocations.cc
+++ b/tensorflow/compiler/xla/service/gpu/buffer_allocations.cc
@@ -62,7 +62,7 @@ StatusOr<std::unique_ptr<BufferAllocations>> BufferAllocations::Builder::Build(
if (reinterpret_cast<uintptr_t>(address.opaque()) % expected_alignment !=
0) {
return InternalError(
- "Address of registered buffer %lld must be a multiple of %llx, but "
+ "Address of registered buffer %d must be a multiple of %x, but "
"was %p",
i, kEntryParameterAlignBytes, address.opaque());
}
@@ -83,7 +83,7 @@ StatusOr<std::unique_ptr<BufferAllocations>> BufferAllocations::Builder::Build(
0) {
return InternalError(
"Address returned by memory_allocator->Allocate must be a "
- "multiple of %llx, but was %p",
+ "multiple of %x, but was %p",
kXlaAllocatedBufferAlignBytes, buffer.opaque());
}
// We do manual memory management within BufferAllocations. Be sure not
diff --git a/tensorflow/compiler/xla/service/gpu/buffer_comparator.cc b/tensorflow/compiler/xla/service/gpu/buffer_comparator.cc
index f22c2a8add..13c83c9199 100644
--- a/tensorflow/compiler/xla/service/gpu/buffer_comparator.cc
+++ b/tensorflow/compiler/xla/service/gpu/buffer_comparator.cc
@@ -124,7 +124,7 @@ StatusOr<F16BufferComparator> F16BufferComparator::Create(
StatusOr<bool> F16BufferComparator::CompareEqualImpl(
se::DeviceMemory<Eigen::half> test_buffer) {
if (ref_buffer_.root_buffer().size() != test_buffer.size()) {
- return InternalError("Mismatched buffer size: %lld vs %lld",
+ return InternalError("Mismatched buffer size: %d vs %d",
ref_buffer_.root_buffer().size(), test_buffer.size());
}
diff --git a/tensorflow/compiler/xla/service/gpu/conditional_thunk.cc b/tensorflow/compiler/xla/service/gpu/conditional_thunk.cc
index 8b0426aa27..9ed523998b 100644
--- a/tensorflow/compiler/xla/service/gpu/conditional_thunk.cc
+++ b/tensorflow/compiler/xla/service/gpu/conditional_thunk.cc
@@ -59,7 +59,7 @@ Status ConditionalThunk::ExecuteOnStream(
Status block_status = stream->BlockHostUntilDone();
if (!block_status.ok()) {
return InternalError("Failed to retrieve predicate value on stream %p: %s.",
- stream, block_status.error_message().c_str());
+ stream, block_status.error_message());
}
// Execute the true or the false computation depending on the value of the
diff --git a/tensorflow/compiler/xla/service/gpu/convolution_thunk.cc b/tensorflow/compiler/xla/service/gpu/convolution_thunk.cc
index 854a2f50b2..eea31f3de1 100644
--- a/tensorflow/compiler/xla/service/gpu/convolution_thunk.cc
+++ b/tensorflow/compiler/xla/service/gpu/convolution_thunk.cc
@@ -22,7 +22,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/util.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
diff --git a/tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_thunk.cc b/tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_thunk.cc
index 18a76e8c26..bc3c6f72f6 100644
--- a/tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_thunk.cc
+++ b/tensorflow/compiler/xla/service/gpu/cudnn_batchnorm_thunk.cc
@@ -22,7 +22,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/ir_emission_utils.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/util.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
diff --git a/tensorflow/compiler/xla/service/gpu/cudnn_convolution_algorithm_picker.cc b/tensorflow/compiler/xla/service/gpu/cudnn_convolution_algorithm_picker.cc
index 3d421ebb69..dbdf8e7a0e 100644
--- a/tensorflow/compiler/xla/service/gpu/cudnn_convolution_algorithm_picker.cc
+++ b/tensorflow/compiler/xla/service/gpu/cudnn_convolution_algorithm_picker.cc
@@ -15,6 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/cudnn_convolution_algorithm_picker.h"
#include "absl/strings/str_cat.h"
+#include "absl/strings/str_format.h"
#include "absl/types/optional.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/gpu/backend_configs.pb.h"
@@ -59,8 +60,8 @@ StatusOr<se::DeviceMemory<uint8>> ScratchAllocator::AllocateBytes(
if (byte_size > GetMemoryLimitInBytes(stream)) {
return se::port::Status(
se::port::error::RESOURCE_EXHAUSTED,
- tensorflow::strings::Printf(
- "Allocating %lld bytes exceeds the memory limit of %lld bytes.",
+ absl::StrFormat(
+ "Allocating %d bytes exceeds the memory limit of %d bytes.",
byte_size, GetMemoryLimitInBytes(stream)));
}
@@ -361,7 +362,7 @@ CudnnConvolutionAlgorithmPicker::PickBestAlgorithm(
return InternalError(
"All algorithms tried for convolution %s failed. Falling back to "
"default algorithm.",
- instr->ToString().c_str());
+ instr->ToString());
}
StatusOr<bool> CudnnConvolutionAlgorithmPicker::RunOnInstruction(
diff --git a/tensorflow/compiler/xla/service/gpu/cudnn_convolution_runner.cc b/tensorflow/compiler/xla/service/gpu/cudnn_convolution_runner.cc
index 68086c86e9..07b96fbd3f 100644
--- a/tensorflow/compiler/xla/service/gpu/cudnn_convolution_runner.cc
+++ b/tensorflow/compiler/xla/service/gpu/cudnn_convolution_runner.cc
@@ -197,8 +197,8 @@ Status RunCudnnConvolution(
if (!stream->ok()) {
return InternalError(
- "Unable to launch convolution with type %s and algorithm (%lld, %lld)",
- CudnnConvKindToString(kind).c_str(), algorithm.algorithm().algo_id(),
+ "Unable to launch convolution with type %s and algorithm (%d, %d)",
+ CudnnConvKindToString(kind), algorithm.algorithm().algo_id(),
algorithm.algorithm_no_scratch().algo_id());
}
return Status::OK();
diff --git a/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc b/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc
index 2460d951bd..afcf9fa2ea 100644
--- a/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc
+++ b/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc
@@ -107,7 +107,7 @@ StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitLibdeviceMathCall(
break;
default:
return Unimplemented("Bad type for libdevice math call: %s",
- PrimitiveType_Name(output_type).c_str());
+ PrimitiveType_Name(output_type));
}
llvm::Value* result = EmitMathCall(munged_callee, converted_operands,
converted_input_types, output_type)
@@ -138,7 +138,7 @@ StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitLlvmIntrinsicMathCall(
break;
default:
return Unimplemented("Bad type for llvm intrinsic math call: %s",
- PrimitiveType_Name(output_type).c_str());
+ PrimitiveType_Name(output_type));
}
return EmitMathCall(munged_callee, operands, input_types, output_type);
}
@@ -152,8 +152,8 @@ StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitMathCall(
for (PrimitiveType input_type : input_types) {
if (output_type != input_type) {
return Unimplemented("Input type ≠ output type: %s ≠ %s",
- PrimitiveType_Name(input_type).c_str(),
- PrimitiveType_Name(output_type).c_str());
+ PrimitiveType_Name(input_type),
+ PrimitiveType_Name(output_type));
}
}
diff --git a/tensorflow/compiler/xla/service/gpu/fft_thunk.cc b/tensorflow/compiler/xla/service/gpu/fft_thunk.cc
index def595d217..11549cdac5 100644
--- a/tensorflow/compiler/xla/service/gpu/fft_thunk.cc
+++ b/tensorflow/compiler/xla/service/gpu/fft_thunk.cc
@@ -18,10 +18,10 @@ limitations under the License.
#include <string>
#include "absl/strings/str_cat.h"
+#include "absl/strings/str_format.h"
#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/util.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
@@ -43,8 +43,8 @@ StatusOr<se::DeviceMemory<uint8>> FftScratchAllocator::AllocateBytes(
if (byte_size > GetMemoryLimitInBytes(stream)) {
return se::port::Status(
se::port::error::RESOURCE_EXHAUSTED,
- tensorflow::strings::Printf(
- "Allocating %lld bytes exceeds the memory limit of %lld bytes.",
+ absl::StrFormat(
+ "Allocating %d bytes exceeds the memory limit of %d bytes.",
byte_size, GetMemoryLimitInBytes(stream)));
}
@@ -213,7 +213,7 @@ Status FftThunk::ExecuteOnStream(const BufferAllocations& buffer_allocations,
return Status::OK();
}
return InternalError("Unable to launch fft for thunk %p with type %s", this,
- FftTypeToString(fft_type_).c_str());
+ FftTypeToString(fft_type_));
}
} // namespace gpu
diff --git a/tensorflow/compiler/xla/service/gpu/gemm_thunk.cc b/tensorflow/compiler/xla/service/gpu/gemm_thunk.cc
index 2c02ec2584..9c4a490366 100644
--- a/tensorflow/compiler/xla/service/gpu/gemm_thunk.cc
+++ b/tensorflow/compiler/xla/service/gpu/gemm_thunk.cc
@@ -186,7 +186,7 @@ StatusOr<se::blas::AlgorithmType> DoGemmAutotune(
}
return InternalError(
- "Unable to autotune cuBLAS gemm on stream %p; none of the %zu algorithms "
+ "Unable to autotune cuBLAS gemm on stream %p; none of the %u algorithms "
"ran successfully",
stream, algorithms.size());
}
diff --git a/tensorflow/compiler/xla/service/gpu/gpu_executable.cc b/tensorflow/compiler/xla/service/gpu/gpu_executable.cc
index 88be63e267..71a02e70df 100644
--- a/tensorflow/compiler/xla/service/gpu/gpu_executable.cc
+++ b/tensorflow/compiler/xla/service/gpu/gpu_executable.cc
@@ -160,7 +160,7 @@ Status GpuExecutable::ExecuteThunks(
if (!block_status.ok()) {
return InternalError(
"Failed to complete all kernels launched on stream %p: %s",
- main_stream, block_status.error_message().c_str());
+ main_stream, block_status.error_message());
}
}
@@ -260,10 +260,9 @@ StatusOr<ScopedShapedBuffer> GpuExecutable::ExecuteOnStream(
if (buffer.is_null() && buffer.size() > 0) {
return FailedPrecondition(
"Cannot run XLA computation because pointer to (sub-)buffer at "
- "index %s of parameter %lld was null. All pointers to "
- "(sub-)buffers must not be null, unless the (sub-)buffer has zero "
- "elements.",
- allocation.param_shape_index().ToString().c_str(), param_no);
+ "index %s of parameter %d was null. All pointers to (sub-)buffers "
+ "must not be null, unless the (sub-)buffer has zero elements.",
+ allocation.param_shape_index().ToString(), param_no);
}
buffer_allocations_builder.RegisterBuffer(i, buffer);
diff --git a/tensorflow/compiler/xla/service/gpu/gpu_hlo_support_checker.cc b/tensorflow/compiler/xla/service/gpu/gpu_hlo_support_checker.cc
index 4944c41f7d..4268fb2c7a 100644
--- a/tensorflow/compiler/xla/service/gpu/gpu_hlo_support_checker.cc
+++ b/tensorflow/compiler/xla/service/gpu/gpu_hlo_support_checker.cc
@@ -34,9 +34,8 @@ StatusOr<bool> GpuHloSupportChecker::Run(HloModule* module) {
return xla::Unimplemented(
"GPU backend does not support HLO instruction %s with shape "
"containing a sparse layout: %s",
- instruction->ToString().c_str(),
- ShapeUtil::HumanStringWithLayout(instruction->shape())
- .c_str());
+ instruction->ToString(),
+ ShapeUtil::HumanStringWithLayout(instruction->shape()));
}
return Status::OK();
}));
diff --git a/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.cc b/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.cc
index 44303724bb..f3c2744292 100644
--- a/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.cc
+++ b/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.cc
@@ -84,7 +84,7 @@ Status GpuTransferManager::EnqueueBuffersToInfeed(
Status block_status = stream->BlockHostUntilDone();
if (!block_status.ok()) {
return InternalError("Failed to complete data transfer on stream %p: %s",
- stream, block_status.error_message().c_str());
+ stream, block_status.error_message());
}
infeed_manager->EnqueueDestination(std::move(buffers));
@@ -97,7 +97,7 @@ Status GpuTransferManager::EnqueueBuffersToInfeed(
StatusOr<InfeedBuffer> GpuTransferManager::TransferBufferToInfeedInternal(
se::StreamExecutor* executor, int64 size, const void* source) {
if (size > std::numeric_limits<int32>::max()) {
- return InvalidArgument("Infeed shape is too large: needs %lld bytes", size);
+ return InvalidArgument("Infeed shape is too large: needs %d bytes", size);
}
if (size == 0) {
diff --git a/tensorflow/compiler/xla/service/gpu/hlo_schedule_test.cc b/tensorflow/compiler/xla/service/gpu/hlo_schedule_test.cc
index d4a96cd5b3..bb147c8d98 100644
--- a/tensorflow/compiler/xla/service/gpu/hlo_schedule_test.cc
+++ b/tensorflow/compiler/xla/service/gpu/hlo_schedule_test.cc
@@ -19,6 +19,7 @@ limitations under the License.
#include <unordered_set>
#include "absl/memory/memory.h"
+#include "absl/strings/str_format.h"
#include "tensorflow/compiler/xla/service/gpu/stream_assignment.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
@@ -266,7 +267,7 @@ TEST_F(HloScheduleTest, LatticeMatMul) {
params.reserve(6);
for (int i = 0; i < 6; ++i) {
params.push_back(builder.AddInstruction(HloInstruction::CreateParameter(
- i, f32_2x2_, /*name=*/tensorflow::strings::Printf("param%d", i))));
+ i, f32_2x2_, /*name=*/absl::StrFormat("param%d", i))));
}
HloInstruction* d00 = builder.AddInstruction(
HloInstruction::CreateCanonicalDot(f32_2x2_, params[2], params[3]));
diff --git a/tensorflow/compiler/xla/service/gpu/infeed_thunk.cc b/tensorflow/compiler/xla/service/gpu/infeed_thunk.cc
index fee6d2af3b..8c3a026740 100644
--- a/tensorflow/compiler/xla/service/gpu/infeed_thunk.cc
+++ b/tensorflow/compiler/xla/service/gpu/infeed_thunk.cc
@@ -96,7 +96,7 @@ Status InfeedThunk::ExecuteOnStream(const BufferAllocations& buffer_allocations,
Status block_status = stream->BlockHostUntilDone();
if (!block_status.ok()) {
return InternalError("Failed to complete data transfer on stream %p: %s",
- stream, block_status.error_message().c_str());
+ stream, block_status.error_message());
}
VLOG(2) << "Infeeding to GPU complete";
diff --git a/tensorflow/compiler/xla/service/gpu/instruction_fusion_test.cc b/tensorflow/compiler/xla/service/gpu/instruction_fusion_test.cc
index 8d0522bd8f..f53dfaee3d 100644
--- a/tensorflow/compiler/xla/service/gpu/instruction_fusion_test.cc
+++ b/tensorflow/compiler/xla/service/gpu/instruction_fusion_test.cc
@@ -365,7 +365,7 @@ static StatusOr<const HloInstruction*> FindHloInstruction(
}
return NotFound(
"Computation '%s' does not contain an instruction with op code '%s'.",
- computation.name().c_str(), HloOpcodeString(op).c_str());
+ computation.name(), HloOpcodeString(op));
}
TEST_F(InstructionFusionTest, MultiOutputFusion) {
diff --git a/tensorflow/compiler/xla/service/gpu/ir_emitter.cc b/tensorflow/compiler/xla/service/gpu/ir_emitter.cc
index 7111b53944..4cbb6d75a8 100644
--- a/tensorflow/compiler/xla/service/gpu/ir_emitter.cc
+++ b/tensorflow/compiler/xla/service/gpu/ir_emitter.cc
@@ -384,8 +384,8 @@ Status IrEmitter::EmitAtomicOperationForNestedComputation(
// TODO(b/30258929): We only accept binary computations so far.
return Unimplemented(
"We only support atomic functions with exactly two parameters, but "
- "computation %s has %lld.",
- computation.name().c_str(), computation.num_parameters());
+ "computation %s has %d.",
+ computation.name(), computation.num_parameters());
}
if (MaybeEmitDirectAtomicOperation(computation, output_address,
diff --git a/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc b/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc
index 9c7b508e10..4d98955c58 100644
--- a/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc
+++ b/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc
@@ -2674,8 +2674,7 @@ Status CheckHloBuffersShareAllocation(
if (slice_a != slice_b) {
return InternalError(
"instruction %s %s does not share allocation with instruction %s %s",
- a->ToString().c_str(), slice_a.ToString().c_str(),
- b->ToString().c_str(), slice_b.ToString().c_str());
+ a->ToString(), slice_a.ToString(), b->ToString(), slice_b.ToString());
}
return Status::OK();
}
diff --git a/tensorflow/compiler/xla/service/gpu/kernel_thunk.cc b/tensorflow/compiler/xla/service/gpu/kernel_thunk.cc
index d856299889..3259eaa2a2 100644
--- a/tensorflow/compiler/xla/service/gpu/kernel_thunk.cc
+++ b/tensorflow/compiler/xla/service/gpu/kernel_thunk.cc
@@ -63,7 +63,7 @@ Status KernelThunk::Initialize(const GpuExecutable& executable,
if (kernel_cache_.end() == it) {
it = kernel_cache_.emplace(executor, se::KernelBase(executor)).first;
if (!executor->GetKernel(*loader_spec_, &it->second)) {
- return InternalError("Unable to load kernel %s", kernel_name_.c_str());
+ return InternalError("Unable to load kernel %s", kernel_name_);
}
}
@@ -107,7 +107,7 @@ Status KernelThunk::ExecuteOnStream(const BufferAllocations& buffer_allocations,
stream, se::ThreadDim(launch_dimensions.threads_per_block()),
se::BlockDim(launch_dimensions.block_count()), *kernel,
*kernel_args)) {
- return InternalError("Unable to launch kernel %s", kernel_name_.c_str());
+ return InternalError("Unable to launch kernel %s", kernel_name_);
}
return Status::OK();
}
diff --git a/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/BUILD b/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/BUILD
index ccf082c4c6..698d2d51cc 100644
--- a/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/BUILD
+++ b/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/BUILD
@@ -36,6 +36,7 @@ cc_library(
"//tensorflow/core:lib_internal",
"@com_google_absl//absl/memory",
"@com_google_absl//absl/strings",
+ "@com_google_absl//absl/strings:str_format",
"@llvm//:amdgpu_code_gen",
"@llvm//:analysis",
"@llvm//:bit_reader",
diff --git a/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/dump_ir_pass.cc b/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/dump_ir_pass.cc
index a3c74507dd..85bc58cb44 100644
--- a/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/dump_ir_pass.cc
+++ b/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/dump_ir_pass.cc
@@ -15,6 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/dump_ir_pass.h"
+#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/FileSystem.h"
@@ -22,7 +23,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/utils.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/core/lib/io/path.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/logging.h"
namespace xla {
@@ -87,9 +87,10 @@ void IrDumpingPassManager::run(llvm::Module &module) {
llvm::PassRegistry::getPassRegistry()->getPassInfo(P->getPassID());
const string basename = ReplaceFilenameExtension(
absl::string_view(tensorflow::io::Basename(input_filename_)),
- tensorflow::strings::Printf(
+ absl::StrFormat(
"pass-%02d.before.%s.ll", i,
- (PI == nullptr ? "unknown" : PI->getPassArgument().data())));
+ absl::string_view(PI == nullptr ? "unknown"
+ : PI->getPassArgument().data())));
llvm::legacy::PassManager::add(
new DumpIrPass(tensorflow::io::JoinPath(output_dir_, basename)));
}
diff --git a/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/nvptx_backend_lib.cc b/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/nvptx_backend_lib.cc
index e18d7e764a..8751e3a9c2 100644
--- a/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/nvptx_backend_lib.cc
+++ b/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/nvptx_backend_lib.cc
@@ -57,7 +57,6 @@ limitations under the License.
#include "llvm/Transforms/Scalar.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/core/lib/io/path.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/tracing.h"
diff --git a/tensorflow/compiler/xla/service/gpu/outfeed_thunk.cc b/tensorflow/compiler/xla/service/gpu/outfeed_thunk.cc
index b99d998c4d..e0f3e84a4c 100644
--- a/tensorflow/compiler/xla/service/gpu/outfeed_thunk.cc
+++ b/tensorflow/compiler/xla/service/gpu/outfeed_thunk.cc
@@ -96,7 +96,7 @@ Status OutfeedThunk::ExecuteOnStream(
Status block_status = stream->BlockHostUntilDone();
if (!block_status.ok()) {
return InternalError("Failed to complete data transfer on stream %p: %s",
- stream, block_status.error_message().c_str());
+ stream, block_status.error_message());
}
VLOG(2) << "Outfeeding from GPU complete";
diff --git a/tensorflow/compiler/xla/service/gpu/partition_assignment.cc b/tensorflow/compiler/xla/service/gpu/partition_assignment.cc
index c927c5ee16..cf9f102d31 100644
--- a/tensorflow/compiler/xla/service/gpu/partition_assignment.cc
+++ b/tensorflow/compiler/xla/service/gpu/partition_assignment.cc
@@ -19,6 +19,7 @@ limitations under the License.
#include <string>
#include "absl/memory/memory.h"
+#include "absl/strings/str_format.h"
#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
@@ -26,7 +27,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/lib/core/bits.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/logging.h"
namespace xla {
@@ -34,9 +34,8 @@ namespace gpu {
std::ostream& operator<<(std::ostream& out,
const LaunchDimensions& launch_dims) {
- out << tensorflow::strings::Printf("[block: %lld, thread: %lld]",
- launch_dims.block_count(),
- launch_dims.threads_per_block());
+ out << absl::StrFormat("[block: %d, thread: %d]", launch_dims.block_count(),
+ launch_dims.threads_per_block());
return out;
}
@@ -91,9 +90,9 @@ LaunchDimensions CalculateLaunchDimensions(
}
int64 block_count = CeilOfRatio(num_elements, threads_per_block);
- VLOG(2) << tensorflow::strings::Printf(
+ VLOG(2) << absl::StrFormat(
"Initialized the block count to ceil(# of elements / threads per "
- "block) = ceil(%lld/%lld) = %lld",
+ "block) = ceil(%d/%d) = %d",
num_elements, threads_per_block, block_count);
return LaunchDimensions(block_count, threads_per_block);
diff --git a/tensorflow/compiler/xla/service/gpu/stream_assignment_test.cc b/tensorflow/compiler/xla/service/gpu/stream_assignment_test.cc
index 3f75d8b559..091aca23e5 100644
--- a/tensorflow/compiler/xla/service/gpu/stream_assignment_test.cc
+++ b/tensorflow/compiler/xla/service/gpu/stream_assignment_test.cc
@@ -16,13 +16,13 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/stream_assignment.h"
#include "absl/memory/memory.h"
+#include "absl/strings/str_format.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
#include "tensorflow/compiler/xla/test_helpers.h"
#include "tensorflow/compiler/xla/tests/hlo_test_base.h"
#include "tensorflow/compiler/xla/types.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
namespace xla {
namespace gpu {
@@ -98,7 +98,7 @@ TEST_F(StreamAssignmentTest, LatticeMatMul) {
params.reserve(6);
for (int i = 0; i < 6; ++i) {
params.push_back(builder.AddInstruction(HloInstruction::CreateParameter(
- i, f32_2x2_, /*name=*/tensorflow::strings::Printf("param%d", i))));
+ i, f32_2x2_, /*name=*/absl::StrFormat("param%d", i))));
}
HloInstruction* d00 = builder.AddInstruction(
HloInstruction::CreateCanonicalDot(f32_2x2_, params[2], params[3]));
diff --git a/tensorflow/compiler/xla/service/gpu/while_thunk.cc b/tensorflow/compiler/xla/service/gpu/while_thunk.cc
index 828fc2884b..c4754fe378 100644
--- a/tensorflow/compiler/xla/service/gpu/while_thunk.cc
+++ b/tensorflow/compiler/xla/service/gpu/while_thunk.cc
@@ -70,7 +70,7 @@ Status WhileThunk::ExecuteOnStream(const BufferAllocations& buffer_allocations,
if (!block_status.ok()) {
return InternalError(
"Failed to complete all kernels launched on stream %p: %s", stream,
- block_status.error_message().c_str());
+ block_status.error_message());
}
if (!condition_result) {
diff --git a/tensorflow/compiler/xla/service/hlo_computation.cc b/tensorflow/compiler/xla/service/hlo_computation.cc
index cf95b112d7..4a59380ed9 100644
--- a/tensorflow/compiler/xla/service/hlo_computation.cc
+++ b/tensorflow/compiler/xla/service/hlo_computation.cc
@@ -625,16 +625,15 @@ StatusOr<HloInstruction*> HloComputation::DeepCopyInstruction(
if (instruction->parent() != this) {
return FailedPrecondition(
"Can't deep copy instruction %s: instruction is not in computation %s",
- instruction->name().c_str(), name().c_str());
+ instruction->name(), name());
}
if (indices_to_copy != nullptr &&
!ShapeUtil::Compatible(instruction->shape(), indices_to_copy->shape())) {
return FailedPrecondition(
"Can't deep copy instruction %s: given shape tree of indices to copy "
"has incompatible shapes: %s vs. %s",
- instruction->name().c_str(),
- ShapeUtil::HumanString(instruction->shape()).c_str(),
- ShapeUtil::HumanString(indices_to_copy->shape()).c_str());
+ instruction->name(), ShapeUtil::HumanString(instruction->shape()),
+ ShapeUtil::HumanString(indices_to_copy->shape()));
}
ShapeIndex index;
@@ -664,7 +663,7 @@ StatusOr<HloInstruction*> HloComputation::DeepCopyInstructionWithCustomCopier(
if (instruction->parent() != this) {
return FailedPrecondition(
"Can't deep copy instruction %s: instruction is not in computation %s",
- instruction->name().c_str(), name().c_str());
+ instruction->name(), name());
}
ShapeIndex index;
return DeepCopyHelper(instruction, &index, copy_leaf);
diff --git a/tensorflow/compiler/xla/service/hlo_dataflow_analysis.cc b/tensorflow/compiler/xla/service/hlo_dataflow_analysis.cc
index 1d35757b42..3376d170e6 100644
--- a/tensorflow/compiler/xla/service/hlo_dataflow_analysis.cc
+++ b/tensorflow/compiler/xla/service/hlo_dataflow_analysis.cc
@@ -837,7 +837,7 @@ Status HloDataflowAnalysis::InitializeInstructionValueSets() {
return Unimplemented(
"Computation %s is called in both a parallel (eg, kMap) and "
"sequential (eg, kCall) context",
- computation->name().c_str());
+ computation->name());
}
if (call_graph_node.caller_callsites().empty() ||
call_graph_node.context() == CallContext::kParallel) {
diff --git a/tensorflow/compiler/xla/service/hlo_evaluator.cc b/tensorflow/compiler/xla/service/hlo_evaluator.cc
index ca1c4dd0e9..71f91fde93 100644
--- a/tensorflow/compiler/xla/service/hlo_evaluator.cc
+++ b/tensorflow/compiler/xla/service/hlo_evaluator.cc
@@ -435,7 +435,7 @@ Status HloEvaluator::HandleIsFinite(HloInstruction* is_finite) {
if (!ShapeUtil::ElementIsFloating(operand->shape())) {
return InvalidArgument(
"expected element type in shape to be float for IsFinite op, got: %s",
- PrimitiveType_Name(operand->shape().element_type()).c_str());
+ PrimitiveType_Name(operand->shape().element_type()));
}
switch (operand->shape().element_type()) {
@@ -476,9 +476,9 @@ Status HloEvaluator::HandleCompare(HloInstruction* compare) {
return Unimplemented(
"Implicit broadcasting is currently unsupported in HLO evaluator "
"Shape Mismatch: %s vs %s vs %s",
- ShapeUtil::HumanString(compare->shape()).c_str(),
- ShapeUtil::HumanString(lhs->shape()).c_str(),
- ShapeUtil::HumanString(rhs->shape()).c_str());
+ ShapeUtil::HumanString(compare->shape()),
+ ShapeUtil::HumanString(lhs->shape()),
+ ShapeUtil::HumanString(rhs->shape()));
}
TF_RET_CHECK(lhs->shape().element_type() == rhs->shape().element_type());
@@ -1105,8 +1105,8 @@ Status HloEvaluator::HandleWhile(HloInstruction* while_hlo) {
HloEvaluator loop_body_evaluator(max_loop_iterations_);
while (keep_going) {
if (max_loop_iterations_ >= 0 && iteration_count++ > max_loop_iterations_) {
- return InvalidArgument("Loop %s exceeded loop iteration limit (%lld).",
- while_hlo->name().c_str(), max_loop_iterations_);
+ return InvalidArgument("Loop %s exceeded loop iteration limit (%d).",
+ while_hlo->name(), max_loop_iterations_);
}
TF_ASSIGN_OR_RETURN(auto cond_val, cond_evaluator.Evaluate<Literal*>(
*cond_comp, {lcv.get()}));
@@ -1262,7 +1262,7 @@ Status HloEvaluator::HandleSort(HloInstruction* sort) {
const int64 rank = ShapeUtil::Rank(sort->operand(0)->shape());
if (sort_dim != rank - 1) {
return Unimplemented(
- "Trying to support along dimension %lld, which is not the last "
+ "Trying to support along dimension %d, which is not the last "
"dimension",
sort_dim);
}
diff --git a/tensorflow/compiler/xla/service/hlo_evaluator.h b/tensorflow/compiler/xla/service/hlo_evaluator.h
index 7588916de5..0ea7089552 100644
--- a/tensorflow/compiler/xla/service/hlo_evaluator.h
+++ b/tensorflow/compiler/xla/service/hlo_evaluator.h
@@ -222,8 +222,8 @@ class HloEvaluator : public DfsHloVisitorWithDefault {
return Unimplemented(
"Implicit broadcasting is currently unsupported in HLO evaluator "
"Shape Mismatch: %s vs %s",
- ShapeUtil::HumanString(shape).c_str(),
- ShapeUtil::HumanString(operand->shape()).c_str());
+ ShapeUtil::HumanString(shape),
+ ShapeUtil::HumanString(operand->shape()));
}
auto result = absl::make_unique<Literal>(shape);
diff --git a/tensorflow/compiler/xla/service/hlo_evaluator_typed_visitor.h b/tensorflow/compiler/xla/service/hlo_evaluator_typed_visitor.h
index 2da2cc2d71..b6566ebefe 100644
--- a/tensorflow/compiler/xla/service/hlo_evaluator_typed_visitor.h
+++ b/tensorflow/compiler/xla/service/hlo_evaluator_typed_visitor.h
@@ -143,7 +143,7 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
Status DefaultAction(HloInstruction* hlo_instruction) override {
return Unimplemented("unhandled HLO ops for HloEvaluator: %s.",
- HloOpcodeString(hlo_instruction->opcode()).c_str());
+ HloOpcodeString(hlo_instruction->opcode()));
}
// TODO(b/35950897): many of the stl functions used in the handlers are not
@@ -2654,9 +2654,8 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
return Unimplemented(
"Implicit broadcasting is currently unsupported in HLO evaluator "
"Shape Mismatch: %s vs %s vs %s: ",
- ShapeUtil::HumanString(shape).c_str(),
- ShapeUtil::HumanString(lhs->shape()).c_str(),
- ShapeUtil::HumanString(rhs->shape()).c_str());
+ ShapeUtil::HumanString(shape), ShapeUtil::HumanString(lhs->shape()),
+ ShapeUtil::HumanString(rhs->shape()));
}
const Literal& lhs_literal = parent_->GetEvaluatedLiteralFor(lhs);
@@ -2690,10 +2689,9 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
return Unimplemented(
"Implicit broadcasting is currently unsupported in HLO evaluator "
"Shape Mismatch: %s vs %s vs %s vs %s: ",
- ShapeUtil::HumanString(shape).c_str(),
- ShapeUtil::HumanString(lhs->shape()).c_str(),
- ShapeUtil::HumanString(rhs->shape()).c_str(),
- ShapeUtil::HumanString(ehs->shape()).c_str());
+ ShapeUtil::HumanString(shape), ShapeUtil::HumanString(lhs->shape()),
+ ShapeUtil::HumanString(rhs->shape()),
+ ShapeUtil::HumanString(ehs->shape()));
}
const Literal& lhs_literal = parent_->GetEvaluatedLiteralFor(lhs);
diff --git a/tensorflow/compiler/xla/service/hlo_graph_dumper.cc b/tensorflow/compiler/xla/service/hlo_graph_dumper.cc
index 59c628e945..6cf7730fdc 100644
--- a/tensorflow/compiler/xla/service/hlo_graph_dumper.cc
+++ b/tensorflow/compiler/xla/service/hlo_graph_dumper.cc
@@ -28,6 +28,7 @@ limitations under the License.
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
+#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/types/optional.h"
@@ -44,7 +45,6 @@ limitations under the License.
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/numbers.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/regexp.h"
@@ -57,32 +57,12 @@ using absl::nullopt;
using absl::optional;
using absl::StrAppend;
using absl::StrCat;
+using absl::StrFormat;
using absl::StrJoin;
using tensorflow::Env;
using tensorflow::WriteStringToFile;
using tensorflow::io::JoinPath;
-// Helpers for Printf and Appendf.
-template <typename T>
-struct PrintfConvert {
- const T& operator()(const T& t) const { return t; }
-};
-template <>
-struct PrintfConvert<string> {
- const char* operator()(const string& s) const { return s.c_str(); }
-};
-
-// Like tensorflow::strings::Printf/Appendf, but you don't need to call c_str()
-// on strings.
-template <typename... Ts>
-string Printf(const char* fmt, const Ts&... ts) {
- return tensorflow::strings::Printf(fmt, PrintfConvert<Ts>()(ts)...);
-}
-template <typename... Ts>
-void Appendf(string* s, const char* fmt, const Ts&... ts) {
- tensorflow::strings::Appendf(s, fmt, PrintfConvert<Ts>()(ts)...);
-}
-
// Used to indicate how we should treat a given HLOInstruction in the graph.
// should we treat it like normal, hide it, and so on?
enum NodeFilterResult {
@@ -210,10 +190,9 @@ NodeColors NodeColorsForScheme(ColorScheme color) {
string NodeColorAttributes(ColorScheme color) {
NodeColors node_colors = NodeColorsForScheme(color);
- return Printf(
- R"(style="%s", fontcolor="%s", color="%s", fillcolor="%s")",
- node_colors.style, node_colors.font_color, node_colors.stroke_color,
- node_colors.fill_color);
+ return StrFormat(R"(style="%s", fontcolor="%s", color="%s", fillcolor="%s")",
+ node_colors.style, node_colors.font_color,
+ node_colors.stroke_color, node_colors.fill_color);
}
// Replaces <> with &lt;&gt;, so that this string is safe(er) for use in a
@@ -448,7 +427,7 @@ string HloDotDumper::Dump() {
}
string HloDotDumper::Header() {
- const char* fmt = R"(digraph G {
+ constexpr char fmt[] = R"(digraph G {
rankdir = TB;
compound = true;
label = <<b>%s</b>>;
@@ -481,8 +460,8 @@ stylesheet=<
}
if (profile_ != nullptr) {
auto cycles = profile_->total_cycles_executed(*computation_);
- Appendf(&graph_label, "<br/>total cycles = %lld (%s)", cycles,
- tensorflow::strings::HumanReadableNum(cycles));
+ absl::StrAppendFormat(&graph_label, "<br/>total cycles = %d (%s)", cycles,
+ tensorflow::strings::HumanReadableNum(cycles));
}
// Create CSS rules that say, when you hover over the given node or cluster,
@@ -509,14 +488,14 @@ stylesheet=<
// One could imagine other ways of writing this CSS rule that involve
// less duplication, but this way seems to be relatively performant.
edge_css_rules.push_back(
- Printf(" #%s%d:hover ~ #edge%lld text { fill: %s; }\n"
- " #%s%d:hover ~ #edge%lld path { "
- "stroke: %s; stroke-width: .2em; }\n"
- " #%s%d:hover ~ #edge%lld polygon { "
- "fill: %s; stroke: %s; stroke-width: .2em; }\n",
- elem_type, elem_id, edge_id, color, //
- elem_type, elem_id, edge_id, color, //
- elem_type, elem_id, edge_id, color, color));
+ StrFormat(" #%s%d:hover ~ #edge%d text { fill: %s; }\n"
+ " #%s%d:hover ~ #edge%d path { "
+ "stroke: %s; stroke-width: .2em; }\n"
+ " #%s%d:hover ~ #edge%d polygon { "
+ "fill: %s; stroke: %s; stroke-width: .2em; }\n",
+ elem_type, elem_id, edge_id, color, //
+ elem_type, elem_id, edge_id, color, //
+ elem_type, elem_id, edge_id, color, color));
};
// The "to_node" value may be a NULL, indicating that this points to the
@@ -559,7 +538,7 @@ stylesheet=<
}
}
- return Printf(fmt, graph_label, StrJoin(edge_css_rules, "\n"));
+ return StrFormat(fmt, graph_label, StrJoin(edge_css_rules, "\n"));
}
string HloDotDumper::Footer() { return StrCat(StrJoin(edges_, "\n"), "\n}"); }
@@ -600,9 +579,9 @@ string HloDotDumper::DumpSubcomputation(const HloComputation* subcomp,
VLOG(2) << "Edge: from " << from->name() << " to " << parent_instr->name()
<< " as " << next_edge_id_;
edge_ids_.insert({{from, parent_instr}, next_edge_id_++});
- const char* edge_fmt =
+ constexpr char edge_fmt[] =
R"(%s -> %s [ltail="%s", style="dashed" tooltip="%s -> %s"];)";
- edges_.push_back(Printf(
+ edges_.push_back(StrFormat(
edge_fmt, InstructionId(from), InstructionId(parent_instr),
SubcomputationId(subcomp), subcomp->name(), parent_instr->name()));
}
@@ -619,9 +598,10 @@ string HloDotDumper::DumpSubcomputation(const HloComputation* subcomp,
string subcomp_label, style;
if (parent_instr->opcode() == HloOpcode::kFusion) {
- subcomp_label = Printf("Fused expression for <b>%s</b><br/>%s",
- HtmlLikeStringSanitize(parent_instr->name()),
- HtmlLikeStringSanitize(parent_instr->ToCategory()));
+ subcomp_label =
+ StrFormat("Fused expression for <b>%s</b><br/>%s",
+ HtmlLikeStringSanitize(parent_instr->name()),
+ HtmlLikeStringSanitize(parent_instr->ToCategory()));
string extra_info = GetInstructionNodeExtraInfo(parent_instr);
if (!extra_info.empty()) {
StrAppend(&subcomp_label, "<br/>", extra_info);
@@ -647,18 +627,18 @@ string HloDotDumper::DumpSubcomputation(const HloComputation* subcomp,
strokecolor = highlight ? "#b71c1c" : "#c2c2c2";
}
style =
- Printf(R"(style="rounded,filled,bold"; fillcolor="%s"; color="%s;")",
- fillcolor, strokecolor);
+ StrFormat(R"(style="rounded,filled,bold"; fillcolor="%s"; color="%s;")",
+ fillcolor, strokecolor);
} else {
- subcomp_label = Printf("Subcomputation for <b>%s</b><br/>%s",
- HtmlLikeStringSanitize(parent_instr->name()),
- HtmlLikeStringSanitize(subcomp->name()));
+ subcomp_label = StrFormat("Subcomputation for <b>%s</b><br/>%s",
+ HtmlLikeStringSanitize(parent_instr->name()),
+ HtmlLikeStringSanitize(subcomp->name()));
style = "style=rounded; color=black;";
}
string comp_body = DumpComputation(subcomp);
- const char* computation_fmt = R"(subgraph %s {
+ constexpr char computation_fmt[] = R"(subgraph %s {
%s
label = <%s>;
labelloc = t;
@@ -667,7 +647,7 @@ tooltip = " ";
} // %s
)";
- return Printf(computation_fmt, id, style, subcomp_label, comp_body, id);
+ return StrFormat(computation_fmt, id, style, subcomp_label, comp_body, id);
}
string HloDotDumper::DumpComputation(const HloComputation* comp) {
@@ -718,11 +698,11 @@ string HloDotDumper::DumpRootTag() {
VLOG(2) << "Adding edge from " << from->name() << " to root tag as "
<< next_edge_id_;
edge_ids_.insert({{from, to}, next_edge_id_++});
- edges_.push_back(Printf(R"(%s -> %s [tooltip=" "];)", from_id, to_id));
+ edges_.push_back(StrFormat(R"(%s -> %s [tooltip=" "];)", from_id, to_id));
- return Printf(R"(%s [label=<%s>, shape=%s, tooltip=" ", %s];)"
- "\n",
- to_id, node_body, node_shape, NodeColorAttributes(color));
+ return StrFormat(R"(%s [label=<%s>, shape=%s, tooltip=" ", %s];)"
+ "\n",
+ to_id, node_body, node_shape, NodeColorAttributes(color));
}
static const HloConstantInstruction* TryGetFusionParameterConstant(
@@ -817,10 +797,10 @@ string HloDotDumper::DumpInstruction(const HloInstruction* instr) {
}
}
- return Printf(R"(%s [label=<%s>, shape=%s, tooltip="%s", %s];)"
- "\n",
- InstructionId(instr), node_body, node_shape, node_metadata,
- NodeColorAttributes(color));
+ return StrFormat(R"(%s [label=<%s>, shape=%s, tooltip="%s", %s];)"
+ "\n",
+ InstructionId(instr), node_body, node_shape, node_metadata,
+ NodeColorAttributes(color));
}
string HloDotDumper::GetInstructionNodeInlinedOperands(
@@ -833,7 +813,7 @@ string HloDotDumper::GetInstructionNodeInlinedOperands(
// enumerates all of its empty dimensions (e.g. "{ { {}, {} }, ..."), which
// is just noise.
if (ShapeUtil::IsZeroElementArray(shape)) {
- return Printf("{} (%s)", ShapeUtil::HumanString(constant->shape()));
+ return StrFormat("{} (%s)", ShapeUtil::HumanString(constant->shape()));
}
// Print the literal value of constants with <= K elements.
@@ -848,8 +828,8 @@ string HloDotDumper::GetInstructionNodeInlinedOperands(
// collected from profiling tools. Those constants may not have a valid
// literal.
if (elem_count.has_value() && *elem_count <= 8 && constant->HasLiteral()) {
- return Printf("%s (%s)", constant->literal().ToString(),
- ShapeUtil::HumanString(constant->shape()));
+ return StrFormat("%s (%s)", constant->literal().ToString(),
+ ShapeUtil::HumanString(constant->shape()));
}
// Otherwise, print e.g. "%constant.42 (s32[100])".
@@ -859,8 +839,8 @@ string HloDotDumper::GetInstructionNodeInlinedOperands(
} else {
constant_name = StrCat("constant ", constant->name());
}
- return Printf("%s %s", constant_name,
- ShapeUtil::HumanString(constant->shape()));
+ return StrFormat("%s %s", constant_name,
+ ShapeUtil::HumanString(constant->shape()));
};
std::vector<string> lines;
@@ -881,7 +861,7 @@ string HloDotDumper::GetInstructionNodeInlinedOperands(
TryGetFusionParameterConstant(operand)) {
operand_str = stringify_constant(constant);
} else {
- operand_str = Printf("Parameter %lld", operand->parameter_number());
+ operand_str = StrFormat("Parameter %d", operand->parameter_number());
}
} else {
operand_str = operand->name();
@@ -890,9 +870,9 @@ string HloDotDumper::GetInstructionNodeInlinedOperands(
if (operand_str) {
if (instr->operand_count() > 1) {
- lines.push_back(Printf("<b>operand %lld</b> = %s", i, *operand_str));
+ lines.push_back(StrFormat("<b>operand %d</b> = %s", i, *operand_str));
} else {
- lines.push_back(Printf("<b>operand</b> = %s", *operand_str));
+ lines.push_back(StrFormat("<b>operand</b> = %s", *operand_str));
}
}
}
@@ -1079,13 +1059,13 @@ string HloDotDumper::GetInstructionNodeShape(const HloInstruction* instr) {
string HloDotDumper::GetInstructionNodeLabel(const HloInstruction* instr) {
// If we have a parameter, put the param number in the name.
if (instr->opcode() == HloOpcode::kParameter) {
- return Printf("<b>Parameter %lld</b>", instr->parameter_number());
+ return StrFormat("<b>Parameter %d</b>", instr->parameter_number());
}
// The HLO instruction name contains usually the opcode, e.g. "%add.42" is
// an add instruction. In this case we render just the name.
if (absl::StartsWith(instr->name(), HloOpcodeString(instr->opcode()))) {
- return Printf("<b>%s</b>", HtmlLikeStringSanitize(instr->name()));
+ return StrFormat("<b>%s</b>", HtmlLikeStringSanitize(instr->name()));
}
string extended_opcode =
StrCat(HloOpcodeString(instr->opcode()),
@@ -1093,8 +1073,8 @@ string HloDotDumper::GetInstructionNodeLabel(const HloInstruction* instr) {
? ""
: StrCat(":", xla::ToString(instr->fusion_kind())));
// If the name does not contain the opcode, render both.
- return Printf("<b>%s</b><br/>%s", HtmlLikeStringSanitize(extended_opcode),
- HtmlLikeStringSanitize(instr->name()));
+ return StrFormat("<b>%s</b><br/>%s", HtmlLikeStringSanitize(extended_opcode),
+ HtmlLikeStringSanitize(instr->name()));
}
string HloDotDumper::GetInstructionNodeMetadata(const HloInstruction* instr) {
@@ -1103,13 +1083,13 @@ string HloDotDumper::GetInstructionNodeMetadata(const HloInstruction* instr) {
lines.push_back(HtmlLikeStringSanitize(instr->metadata().op_name()));
}
if (!instr->metadata().op_type().empty()) {
- lines.push_back(Printf(
+ lines.push_back(StrFormat(
"op_type: %s", HtmlLikeStringSanitize(instr->metadata().op_type())));
}
if (!instr->metadata().source_file().empty() &&
instr->metadata().source_line() != 0) {
- lines.push_back(Printf("op_type: %s", instr->metadata().source_file(),
- instr->metadata().source_line()));
+ lines.push_back(StrFormat("op_type: %s:%d", instr->metadata().source_file(),
+ instr->metadata().source_line()));
}
return StrJoin(lines, "<br/>");
@@ -1164,7 +1144,7 @@ string HloDotDumper::GetInstructionNodeExtraInfo(const HloInstruction* instr) {
lines.push_back(instr_shape);
}
if (debug_options_.xla_hlo_graph_addresses()) {
- lines.push_back(Printf("[%p]", instr));
+ lines.push_back(StrFormat("[%p]", instr));
}
if (profile_ != nullptr) {
double hlo_cycles_executed = profile_->GetCyclesTakenBy(*instr);
@@ -1172,8 +1152,8 @@ string HloDotDumper::GetInstructionNodeExtraInfo(const HloInstruction* instr) {
profile_->total_cycles_executed(*instr->parent());
if (hlo_cycles_executed > 0 && total_cycles_executed > 0) {
lines.push_back(
- Printf("%% of cycles executed=%.2f",
- 100 * hlo_cycles_executed / total_cycles_executed));
+ StrFormat("%% of cycles executed=%.2f",
+ 100 * hlo_cycles_executed / total_cycles_executed));
}
}
return StrJoin(lines, "<br/>");
@@ -1208,7 +1188,8 @@ void HloDotDumper::AddInstructionIncomingEdges(const HloInstruction* instr) {
string edge_label;
if (instr->operand_count() > 1 && !control_edge) {
- edge_label = Printf(R"( headlabel="%lld", labeldistance=2)", operand_num);
+ edge_label =
+ StrFormat(R"( headlabel="%d", labeldistance=2)", operand_num);
} else if (control_edge) {
edge_label = "style=\"dotted\" color=\"gray\" label=\"ctrl\"";
}
@@ -1218,10 +1199,11 @@ void HloDotDumper::AddInstructionIncomingEdges(const HloInstruction* instr) {
// means.
bool is_big_array = TotalElementsInShape(from->shape()) >= 4096;
- const char* kEdgeFmt = R"(%s -> %s [arrowhead=%s tooltip="%s -> %s" %s];)";
- edges_.push_back(Printf(kEdgeFmt, InstructionId(from), InstructionId(to),
- (is_big_array ? "normal" : "empty"), from->name(),
- to->name(), edge_label));
+ constexpr char kEdgeFmt[] =
+ R"(%s -> %s [arrowhead=%s tooltip="%s -> %s" %s];)";
+ edges_.push_back(StrFormat(kEdgeFmt, InstructionId(from), InstructionId(to),
+ (is_big_array ? "normal" : "empty"),
+ from->name(), to->name(), edge_label));
};
// Add edges from instr's operands to instr. Parameters within fusion
@@ -1262,11 +1244,11 @@ string HloDotDumper::GetInstructionTrivialComputationStr(
continue;
}
if (instr->called_computations().size() == 1) {
- lines.push_back(Printf("Subcomputation: <b>%s</b>",
- HtmlLikeStringSanitize(*computation_type)));
+ lines.push_back(StrFormat("Subcomputation: <b>%s</b>",
+ HtmlLikeStringSanitize(*computation_type)));
} else {
- lines.push_back(Printf("Subcomputation %lld: <b>%s</b>", i,
- HtmlLikeStringSanitize(*computation_type)));
+ lines.push_back(StrFormat("Subcomputation %d: <b>%s</b>", i,
+ HtmlLikeStringSanitize(*computation_type)));
}
}
return StrJoin(lines, "<br/>");
diff --git a/tensorflow/compiler/xla/service/hlo_instruction.cc b/tensorflow/compiler/xla/service/hlo_instruction.cc
index 3e077d8aec..6b4f3c4eb8 100644
--- a/tensorflow/compiler/xla/service/hlo_instruction.cc
+++ b/tensorflow/compiler/xla/service/hlo_instruction.cc
@@ -2381,7 +2381,7 @@ Status HloInstruction::Visit(DfsHloVisitorBase<HloInstructionPtr>* visitor) {
return InternalError(
"Unhandled HloOpcode for DfsHloVisitor: %s. This should not happen - "
"please file a bug for XLA.",
- HloOpcodeString(opcode_).c_str());
+ HloOpcodeString(opcode_));
}
// Explicit instantiations.
@@ -2464,7 +2464,7 @@ static Status PostOrderDFS(HloInstruction* root, Visitor* visitor,
if (!TF_PREDICT_TRUE(PushDFSChild(visitor, &dfs_stack, child))) {
return FailedPrecondition(
"A cycle is detected while visiting instruction %s",
- current_node->ToString().c_str());
+ current_node->ToString());
}
}
@@ -2473,7 +2473,7 @@ static Status PostOrderDFS(HloInstruction* root, Visitor* visitor,
if (!TF_PREDICT_TRUE(PushDFSChild(visitor, &dfs_stack, child))) {
return FailedPrecondition(
"A cycle is detected while visiting instruction %s",
- current_node->ToString().c_str());
+ current_node->ToString());
}
}
}
@@ -2789,7 +2789,7 @@ StatusOr<HloInstruction::FusionKind> StringToFusionKind(
if (kind_name == "kCustom") {
return HloInstruction::FusionKind::kCustom;
}
- return InvalidArgument("Unknown fusion kind: %s", kind_name.c_str());
+ return InvalidArgument("Unknown fusion kind: %s", kind_name);
}
string PaddingConfigToString(const PaddingConfig& padding) {
diff --git a/tensorflow/compiler/xla/service/hlo_instruction_test.cc b/tensorflow/compiler/xla/service/hlo_instruction_test.cc
index 504b13043f..8b0b90dfb3 100644
--- a/tensorflow/compiler/xla/service/hlo_instruction_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_instruction_test.cc
@@ -53,7 +53,7 @@ class OpAndUserCollectingVisitor : public DfsHloVisitorWithDefault {
public:
Status DefaultAction(HloInstruction* hlo_instruction) override {
return Unimplemented("not implemented %s",
- HloOpcodeString(hlo_instruction->opcode()).c_str());
+ HloOpcodeString(hlo_instruction->opcode()));
}
Status HandleParameter(HloInstruction* parameter) override {
diff --git a/tensorflow/compiler/xla/service/hlo_lexer.cc b/tensorflow/compiler/xla/service/hlo_lexer.cc
index 0e49d343d6..5b23ee7d00 100644
--- a/tensorflow/compiler/xla/service/hlo_lexer.cc
+++ b/tensorflow/compiler/xla/service/hlo_lexer.cc
@@ -306,8 +306,7 @@ TokKind HloLexer::LexNumberOrPattern() {
R"([-]?((\d+|\d+[.]\d*|\d*[.]\d+)([eE][+-]?\d+))|[-]?(\d+[.]\d*|\d*[.]\d+))"};
if (RE2::Consume(&consumable, *float_pattern)) {
current_ptr_ = consumable.begin();
- CHECK(absl::SimpleAtod(string(token_start_, current_ptr_).c_str(),
- &decimal_val_));
+ CHECK(absl::SimpleAtod(string(token_start_, current_ptr_), &decimal_val_));
return TokKind::kDecimal;
}
diff --git a/tensorflow/compiler/xla/service/hlo_module_group_metadata.cc b/tensorflow/compiler/xla/service/hlo_module_group_metadata.cc
index f52a37bc74..a9c5d48983 100644
--- a/tensorflow/compiler/xla/service/hlo_module_group_metadata.cc
+++ b/tensorflow/compiler/xla/service/hlo_module_group_metadata.cc
@@ -163,7 +163,7 @@ Status HloModuleGroupMetadata::VerifyCompanionSets() const {
ss << " " << hlo->name() << std::endl;
}
ss << "has multiple instructions on the same device";
- return FailedPrecondition("%s", ss.str().c_str());
+ return FailedPrecondition("%s", ss.str());
}
}
}
@@ -411,16 +411,16 @@ Status HloModuleGroupMetadata::AddCompanion(HloInstruction* instruction1,
Status HloModuleGroupMetadata::VerifyChannelInstructions() {
for (const Channel& channel : channels_) {
if (channel.send == nullptr) {
- return FailedPrecondition("missing send for id : %lld", channel.id);
+ return FailedPrecondition("missing send for id : %d", channel.id);
}
if (channel.recv == nullptr) {
- return FailedPrecondition("missing recv for id : %lld", channel.id);
+ return FailedPrecondition("missing recv for id : %d", channel.id);
}
if (channel.send_done == nullptr) {
- return FailedPrecondition("missing send-done for id : %lld", channel.id);
+ return FailedPrecondition("missing send-done for id : %d", channel.id);
}
if (channel.recv_done == nullptr) {
- return FailedPrecondition("missing recv-done for id : %lld", channel.id);
+ return FailedPrecondition("missing recv-done for id : %d", channel.id);
}
}
@@ -436,33 +436,33 @@ Status HloModuleGroupMetadata::VerifyChannelInstructions() {
auto send_done_device = GetInstructionDevice(*channel.send_done);
if (!send_device) {
return FailedPrecondition("send instruction must have a device: %s",
- channel.send->ToString().c_str());
+ channel.send->ToString());
}
if (!send_done_device) {
return FailedPrecondition("send_done instruction must have a device: %s",
- channel.send_done->ToString().c_str());
+ channel.send_done->ToString());
}
if (*send_device != *send_done_device) {
return FailedPrecondition(
- "send and send-done (channel=%lld) must be on the same device: %lld "
- "vs. %lld",
+ "send and send-done (channel=%d) must be on the same device: %d "
+ "vs. %d",
channel.id, *send_device, *send_done_device);
}
auto recv_device = GetInstructionDevice(*channel.recv);
auto recv_done_device = GetInstructionDevice(*channel.recv_done);
if (!recv_done_device) {
return FailedPrecondition("recv_done instruction must have a device: %s",
- channel.recv_done->ToString().c_str());
+ channel.recv_done->ToString());
}
if (*recv_device != *recv_done_device) {
return FailedPrecondition(
- "recv and recv-done (channel=%lld) must be on the same device: %lld "
- "vs. %lld",
+ "recv and recv-done (channel=%d) must be on the same device: %d "
+ "vs. %d",
channel.id, *recv_device, *recv_done_device);
}
if (*send_device == *recv_device) {
return FailedPrecondition(
- "send and recv (channel=%lld) must be on different devices: %lld",
+ "send and recv (channel=%d) must be on different devices: %d",
channel.id, *send_device);
}
}
@@ -483,7 +483,7 @@ Status HloModuleGroupMetadata::VerifyChannelInstructions() {
!CheckCompanionPathsCompatibility(
path, GetCompanionsPath(channel.recv_done))) {
return FailedPrecondition(
- "Nest companion paths do not match for channel %lld", channel.id);
+ "Nest companion paths do not match for channel %d", channel.id);
}
}
return Status::OK();
diff --git a/tensorflow/compiler/xla/service/hlo_module_group_util.cc b/tensorflow/compiler/xla/service/hlo_module_group_util.cc
index b5c7681edd..d70328c8a3 100644
--- a/tensorflow/compiler/xla/service/hlo_module_group_util.cc
+++ b/tensorflow/compiler/xla/service/hlo_module_group_util.cc
@@ -282,7 +282,7 @@ Status HloModuleGroupUtil::VisitTopologicalOrder(
"following nodes. Note that the order of the nodes is arbitrary "
"and that the list may include nodes that are not part of the "
"cycle.\n%s",
- predecessor->ToString().c_str(), cyclic_instructions.c_str());
+ predecessor->ToString(), cyclic_instructions);
}
stack.push(predecessor);
}
diff --git a/tensorflow/compiler/xla/service/hlo_opcode.cc b/tensorflow/compiler/xla/service/hlo_opcode.cc
index d1eaf35785..2d4e38589f 100644
--- a/tensorflow/compiler/xla/service/hlo_opcode.cc
+++ b/tensorflow/compiler/xla/service/hlo_opcode.cc
@@ -39,7 +39,7 @@ StatusOr<HloOpcode> StringToHloOpcode(const string& opcode_name) {
});
auto it = opcode_map->find(opcode_name);
if (it == opcode_map->end()) {
- return InvalidArgument("Unknown opcode: %s", opcode_name.c_str());
+ return InvalidArgument("Unknown opcode: %s", opcode_name);
}
return it->second;
}
diff --git a/tensorflow/compiler/xla/service/hlo_ordering.cc b/tensorflow/compiler/xla/service/hlo_ordering.cc
index 8fe91c7278..0581d5c404 100644
--- a/tensorflow/compiler/xla/service/hlo_ordering.cc
+++ b/tensorflow/compiler/xla/service/hlo_ordering.cc
@@ -18,6 +18,7 @@ limitations under the License.
#include <utility>
#include <vector>
+#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/shape_util.h"
@@ -26,7 +27,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/lib/core/errors.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/logging.h"
namespace xla {
@@ -306,17 +306,15 @@ string PredecessorHloOrdering::ToStringHelper(const string& name) const {
std::vector<string> pieces;
pieces.push_back(name);
for (auto* computation : module_->MakeNonfusionComputations()) {
- pieces.push_back(tensorflow::strings::Printf("computation %s:",
- computation->name().c_str()));
+ pieces.push_back(absl::StrFormat("computation %s:", computation->name()));
const auto all = computation->MakeInstructionPostOrder();
for (auto instruction : all) {
- pieces.push_back(tensorflow::strings::Printf(
- " %s predecessors:", instruction->name().c_str()));
+ pieces.push_back(
+ absl::StrFormat(" %s predecessors:", instruction->name()));
for (auto predecessor : all) {
if (predecessors_.at(computation)
->IsReachable(predecessor, instruction)) {
- pieces.push_back(
- tensorflow::strings::Printf(" %s", predecessor->name().c_str()));
+ pieces.push_back(absl::StrFormat(" %s", predecessor->name()));
}
}
}
@@ -372,8 +370,8 @@ string SequentialHloOrdering::ToString() const {
std::vector<string> pieces;
pieces.push_back("SequentialHloOrdering");
for (auto* computation : module_->computations()) {
- pieces.push_back(tensorflow::strings::Printf("computation %s order:",
- computation->name().c_str()));
+ pieces.push_back(
+ absl::StrFormat("computation %s order:", computation->name()));
// Gather all instructions in the module sequence for this computation and
// sort them by their position.
std::vector<const HloInstruction*> instructions;
@@ -388,8 +386,7 @@ string SequentialHloOrdering::ToString() const {
return order_position_.at(a) < order_position_.at(b);
});
for (auto instruction : instructions) {
- pieces.push_back(
- tensorflow::strings::Printf(" %s", instruction->name().c_str()));
+ pieces.push_back(absl::StrFormat(" %s", instruction->name()));
}
}
return absl::StrJoin(pieces, "\n");
diff --git a/tensorflow/compiler/xla/service/hlo_parser.cc b/tensorflow/compiler/xla/service/hlo_parser.cc
index df789e6222..ba0f07dd14 100644
--- a/tensorflow/compiler/xla/service/hlo_parser.cc
+++ b/tensorflow/compiler/xla/service/hlo_parser.cc
@@ -18,6 +18,7 @@ limitations under the License.
#include "absl/algorithm/container.h"
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
+#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "tensorflow/compiler/xla/literal.h"
@@ -29,7 +30,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/lib/gtl/map_util.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
namespace xla {
@@ -39,8 +39,8 @@ using absl::nullopt;
using absl::optional;
using absl::StrAppend;
using absl::StrCat;
+using absl::StrFormat;
using absl::StrJoin;
-using ::tensorflow::strings::Printf;
const double kF16max = 65504;
@@ -1586,8 +1586,7 @@ bool HloParser::ParseInstructionNames(
}
std::pair<HloInstruction*, LocTy>* instr = FindInstruction(name);
if (!instr) {
- return TokenError(
- Printf("instruction '%s' is not defined", name.c_str()));
+ return TokenError(StrFormat("instruction '%s' is not defined", name));
}
instructions->push_back(instr->first);
} while (EatIfPresent(TokKind::kComma));
@@ -1829,17 +1828,17 @@ bool HloParser::ParseDenseLiteral(std::unique_ptr<Literal>* literal,
case TokKind::kLbrace: {
nest_level++;
if (nest_level > rank) {
- return TokenError(Printf(
- "expects nested array in rank %lld, but sees larger", rank));
+ return TokenError(absl::StrFormat(
+ "expects nested array in rank %d, but sees larger", rank));
}
if (nest_level > 1) {
elems_seen_per_dim[nest_level - 2]++;
if (elems_seen_per_dim[nest_level - 2] >
shape.dimensions(nest_level - 2)) {
- return TokenError(Printf(
- "expects %lld elements in the %sth element, but sees more",
+ return TokenError(absl::StrFormat(
+ "expects %d elements in the %sth element, but sees more",
shape.dimensions(nest_level - 2),
- get_index_str(nest_level - 2).c_str()));
+ get_index_str(nest_level - 2)));
}
}
lexer_.Lex();
@@ -1848,9 +1847,9 @@ bool HloParser::ParseDenseLiteral(std::unique_ptr<Literal>* literal,
case TokKind::kRbrace: {
nest_level--;
if (elems_seen_per_dim[nest_level] != shape.dimensions(nest_level)) {
- return TokenError(Printf(
- "expects %lld elements in the %sth element, but sees %lld",
- shape.dimensions(nest_level), get_index_str(nest_level).c_str(),
+ return TokenError(absl::StrFormat(
+ "expects %d elements in the %sth element, but sees %d",
+ shape.dimensions(nest_level), get_index_str(nest_level),
elems_seen_per_dim[nest_level]));
}
elems_seen_per_dim[nest_level] = 0;
@@ -1871,15 +1870,15 @@ bool HloParser::ParseDenseLiteral(std::unique_ptr<Literal>* literal,
if (rank > 0) {
if (nest_level != rank) {
return TokenError(
- Printf("expects nested array in rank %lld, but sees %lld", rank,
- nest_level));
+ absl::StrFormat("expects nested array in rank %d, but sees %d",
+ rank, nest_level));
}
elems_seen_per_dim[rank - 1]++;
if (elems_seen_per_dim[rank - 1] > shape.dimensions(rank - 1)) {
- return TokenError(
- Printf("expects %lld elements on the minor-most dimension, but "
- "sees more",
- shape.dimensions(rank - 1)));
+ return TokenError(absl::StrFormat(
+ "expects %d elements on the minor-most dimension, but "
+ "sees more",
+ shape.dimensions(rank - 1)));
}
}
if (lexer_.GetKind() == TokKind::kw_true ||
@@ -2135,8 +2134,8 @@ bool HloParser::ParseSubAttributes(
for (const auto& attr_it : attrs) {
if (attr_it.second.required &&
seen_attrs.find(attr_it.first) == seen_attrs.end()) {
- return Error(loc, Printf("sub-attribute %s is expected but not seen",
- attr_it.first.c_str()));
+ return Error(loc, StrFormat("sub-attribute %s is expected but not seen",
+ attr_it.first));
}
}
return ParseToken(TokKind::kRbrace, "expects '}' to end sub attributes");
@@ -2156,8 +2155,8 @@ bool HloParser::ParseAttributes(
for (const auto& attr_it : attrs) {
if (attr_it.second.required &&
seen_attrs.find(attr_it.first) == seen_attrs.end()) {
- return Error(loc, Printf("attribute %s is expected but not seen",
- attr_it.first.c_str()));
+ return Error(loc, StrFormat("attribute %s is expected but not seen",
+ attr_it.first));
}
}
return true;
@@ -2173,7 +2172,7 @@ bool HloParser::ParseAttributeHelper(
}
VLOG(1) << "Parsing attribute " << name;
if (!seen_attrs->insert(name).second) {
- return Error(loc, Printf("attribute %s already exists", name.c_str()));
+ return Error(loc, StrFormat("attribute %s already exists", name));
}
auto attr_it = attrs.find(name);
if (attr_it == attrs.end()) {
@@ -2188,8 +2187,8 @@ bool HloParser::ParseAttributeHelper(
StrAppend(out, kv.first);
}));
}
- return Error(loc, Printf("unexpected attribute \"%s\". %s", name.c_str(),
- allowed_attrs.c_str()));
+ return Error(loc, StrFormat("unexpected attribute \"%s\". %s", name,
+ allowed_attrs));
}
AttrTy attr_type = attr_it->second.attr_type;
void* attr_out_ptr = attr_it->second.result;
@@ -2384,7 +2383,7 @@ bool HloParser::ParseAttributeHelper(
}
}();
if (!success) {
- return Error(loc, Printf("error parsing attribute %s", name.c_str()));
+ return Error(loc, StrFormat("error parsing attribute %s", name));
}
return true;
}
@@ -2548,7 +2547,7 @@ bool HloParser::ParseConvolutionDimensionNumbers(
dnums->set_input_spatial_dimensions(c - '0', i);
} else {
return TokenError(
- Printf("expects [0-%lldbf] in lhs dimension numbers", rank - 1));
+ StrFormat("expects [0-%dbf] in lhs dimension numbers", rank - 1));
}
}
}
@@ -2571,7 +2570,7 @@ bool HloParser::ParseConvolutionDimensionNumbers(
dnums->set_kernel_spatial_dimensions(c - '0', i);
} else {
return TokenError(
- Printf("expects [0-%lldio] in rhs dimension numbers", rank - 1));
+ StrFormat("expects [0-%dio] in rhs dimension numbers", rank - 1));
}
}
}
@@ -2593,8 +2592,8 @@ bool HloParser::ParseConvolutionDimensionNumbers(
} else if (c < '0' + rank && c >= '0') {
dnums->set_output_spatial_dimensions(c - '0', i);
} else {
- return TokenError(
- Printf("expects [0-%lldbf] in output dimension numbers", rank - 1));
+ return TokenError(StrFormat(
+ "expects [0-%dbf] in output dimension numbers", rank - 1));
}
}
}
@@ -2640,9 +2639,10 @@ bool HloParser::ParseSliceRanges(SliceRanges* result) {
}
const auto& range = ranges.back();
if (range.size() != 2 && range.size() != 3) {
- return Error(loc, Printf("expects [start:limit:step] or [start:limit], "
- "but sees %ld elements.",
- range.size()));
+ return Error(loc,
+ StrFormat("expects [start:limit:step] or [start:limit], "
+ "but sees %d elements.",
+ range.size()));
}
} while (EatIfPresent(TokKind::kComma));
@@ -2828,14 +2828,13 @@ bool HloParser::ParseDxD(const string& name,
std::vector<tensorflow::int64>* result) {
LocTy loc = lexer_.GetLoc();
if (!result->empty()) {
- return Error(loc,
- Printf("sub-attribute '%s=' already exists", name.c_str()));
+ return Error(loc, StrFormat("sub-attribute '%s=' already exists", name));
}
// 1D
if (lexer_.GetKind() == TokKind::kInt) {
tensorflow::int64 number;
if (!ParseInt64(&number)) {
- return Error(loc, Printf("expects sub-attribute '%s=i'", name.c_str()));
+ return Error(loc, StrFormat("expects sub-attribute '%s=i'", name));
}
result->push_back(number);
return true;
@@ -2844,8 +2843,7 @@ bool HloParser::ParseDxD(const string& name,
if (lexer_.GetKind() == TokKind::kDxD) {
string str = lexer_.GetStrVal();
if (!SplitToInt64s(str, 'x', result)) {
- return Error(loc,
- Printf("expects sub-attribute '%s=ixj...'", name.c_str()));
+ return Error(loc, StrFormat("expects sub-attribute '%s=ixj...'", name));
}
lexer_.Lex();
return true;
@@ -2940,9 +2938,8 @@ bool HloParser::ParseOpcode(HloOpcode* result) {
string val = lexer_.GetStrVal();
auto status_or_result = StringToHloOpcode(val);
if (!status_or_result.ok()) {
- return TokenError(
- Printf("expects opcode but sees: %s, error: %s", val.c_str(),
- status_or_result.status().error_message().c_str()));
+ return TokenError(StrFormat("expects opcode but sees: %s, error: %s", val,
+ status_or_result.status().error_message()));
}
*result = status_or_result.ValueOrDie();
lexer_.Lex();
@@ -2956,7 +2953,7 @@ bool HloParser::ParseFftType(FftType* result) {
}
string val = lexer_.GetStrVal();
if (!FftType_Parse(val, result) || !FftType_IsValid(*result)) {
- return TokenError(Printf("expects fft type but sees: %s", val.c_str()));
+ return TokenError(StrFormat("expects fft type but sees: %s", val));
}
lexer_.Lex();
return true;
@@ -2970,9 +2967,9 @@ bool HloParser::ParseFusionKind(HloInstruction::FusionKind* result) {
string val = lexer_.GetStrVal();
auto status_or_result = StringToFusionKind(val);
if (!status_or_result.ok()) {
- return TokenError(
- Printf("expects fusion kind but sees: %s, error: %s", val.c_str(),
- status_or_result.status().error_message().c_str()));
+ return TokenError(StrFormat("expects fusion kind but sees: %s, error: %s",
+ val,
+ status_or_result.status().error_message()));
}
*result = status_or_result.ValueOrDie();
lexer_.Lex();
@@ -2988,8 +2985,8 @@ bool HloParser::ParseRandomDistribution(RandomDistribution* result) {
auto status_or_result = StringToRandomDistribution(val);
if (!status_or_result.ok()) {
return TokenError(
- Printf("expects random distribution but sees: %s, error: %s",
- val.c_str(), status_or_result.status().error_message().c_str()));
+ StrFormat("expects random distribution but sees: %s, error: %s", val,
+ status_or_result.status().error_message()));
}
*result = status_or_result.ValueOrDie();
lexer_.Lex();
@@ -3004,9 +3001,9 @@ bool HloParser::ParsePrecision(PrecisionConfigProto::Precision* result) {
string val = lexer_.GetStrVal();
auto status_or_result = StringToPrecision(val);
if (!status_or_result.ok()) {
- return TokenError(
- Printf("expects precision but sees: %s, error: %s", val.c_str(),
- status_or_result.status().error_message().c_str()));
+ return TokenError(StrFormat("expects precision but sees: %s, error: %s",
+ val,
+ status_or_result.status().error_message()));
}
*result = status_or_result.ValueOrDie();
lexer_.Lex();
@@ -3100,7 +3097,7 @@ StatusOr<HloSharding> HloParser::ParseShardingOnly() {
lexer_.Lex();
OpSharding op_sharding;
if (!ParseSharding(&op_sharding)) {
- return InvalidArgument("Syntax error:\n%s", GetError().c_str());
+ return InvalidArgument("Syntax error:\n%s", GetError());
}
if (lexer_.GetKind() != TokKind::kEof) {
return InvalidArgument("Syntax error:\nExtra content after sharding");
@@ -3112,7 +3109,7 @@ StatusOr<Window> HloParser::ParseWindowOnly() {
lexer_.Lex();
Window window;
if (!ParseWindow(&window, /*expect_outer_curlies=*/false)) {
- return InvalidArgument("Syntax error:\n%s", GetError().c_str());
+ return InvalidArgument("Syntax error:\n%s", GetError());
}
if (lexer_.GetKind() != TokKind::kEof) {
return InvalidArgument("Syntax error:\nExtra content after window");
@@ -3125,7 +3122,7 @@ HloParser::ParseConvolutionDimensionNumbersOnly() {
lexer_.Lex();
ConvolutionDimensionNumbers dnums;
if (!ParseConvolutionDimensionNumbers(&dnums)) {
- return InvalidArgument("Syntax error:\n%s", GetError().c_str());
+ return InvalidArgument("Syntax error:\n%s", GetError());
}
if (lexer_.GetKind() != TokKind::kEof) {
return InvalidArgument(
@@ -3163,7 +3160,7 @@ Status HloParser::ParseSingleInstruction(HloComputation::Builder* builder,
// Parse the instruction with the registered hook.
if (!ParseInstruction(builder, root_name)) {
- return InvalidArgument("Syntax error:\n%s", GetError().c_str());
+ return InvalidArgument("Syntax error:\n%s", GetError());
}
return Status::OK();
}
@@ -3174,7 +3171,7 @@ StatusOr<std::unique_ptr<HloModule>> ParseHloString(
absl::string_view str, const HloModuleConfig& config) {
HloParser parser(str, config);
if (!parser.Run()) {
- return InvalidArgument("Syntax error:\n%s", parser.GetError().c_str());
+ return InvalidArgument("Syntax error:\n%s", parser.GetError());
}
return parser.ConsumeHloModule();
}
diff --git a/tensorflow/compiler/xla/service/hlo_pass_pipeline.cc b/tensorflow/compiler/xla/service/hlo_pass_pipeline.cc
index df99e131d8..de7ad6d209 100644
--- a/tensorflow/compiler/xla/service/hlo_pass_pipeline.cc
+++ b/tensorflow/compiler/xla/service/hlo_pass_pipeline.cc
@@ -18,6 +18,7 @@ limitations under the License.
#include <functional>
#include "absl/strings/str_cat.h"
+#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "tensorflow/compiler/xla/service/hlo_graph_dumper.h"
#include "tensorflow/compiler/xla/service/hlo_proto_util.h"
@@ -48,9 +49,9 @@ void DumpModuleProto(const HloModule& module, const string& dump_to,
tensorflow::mutex_lock lock(mu);
const int64 pass_number = (*module_id_to_pass_number)[module.unique_id()]++;
- const string mod_name = SanitizeFileName(tensorflow::strings::Printf(
- "module_%04d.%04lld.%s.after_%s", module.unique_id(), pass_number,
- pipeline_name.c_str(), pass_name.c_str()));
+ const string mod_name = SanitizeFileName(
+ absl::StrFormat("module_%04d.%04d.%s.after_%s", module.unique_id(),
+ pass_number, pipeline_name, pass_name));
TF_QCHECK_OK(protobuf_util::DumpProtoToDirectory(MakeHloProto(module),
dump_to, mod_name));
diff --git a/tensorflow/compiler/xla/service/hlo_rematerialization.cc b/tensorflow/compiler/xla/service/hlo_rematerialization.cc
index 6c6e7c6fec..569d2e5d2d 100644
--- a/tensorflow/compiler/xla/service/hlo_rematerialization.cc
+++ b/tensorflow/compiler/xla/service/hlo_rematerialization.cc
@@ -22,6 +22,7 @@ limitations under the License.
#include "absl/container/inlined_vector.h"
#include "absl/strings/str_cat.h"
+#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/primitive_util.h"
@@ -40,7 +41,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/util.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/logging.h"
namespace xla {
@@ -1353,12 +1353,11 @@ StatusOr<bool> HloRematerialization::Run(
XLA_VLOG_LINES(3, "After HloRematerialization:\n" + module->ToString());
if (current_peak_memory > memory_limit_bytes) {
- LOG(WARNING) << tensorflow::strings::Printf(
- "Can't reduce memory use below %s (%lld bytes) by rematerialization; "
- "only reduced to %s (%lld bytes)",
- HumanReadableNumBytes(memory_limit_bytes).c_str(), memory_limit_bytes,
- HumanReadableNumBytes(current_peak_memory).c_str(),
- current_peak_memory);
+ LOG(WARNING) << absl::StrFormat(
+ "Can't reduce memory use below %s (%d bytes) by rematerialization; "
+ "only reduced to %s (%d bytes)",
+ HumanReadableNumBytes(memory_limit_bytes), memory_limit_bytes,
+ HumanReadableNumBytes(current_peak_memory), current_peak_memory);
}
return changed;
diff --git a/tensorflow/compiler/xla/service/hlo_scheduling.cc b/tensorflow/compiler/xla/service/hlo_scheduling.cc
index 56b14f9fef..0fc3b268c0 100644
--- a/tensorflow/compiler/xla/service/hlo_scheduling.cc
+++ b/tensorflow/compiler/xla/service/hlo_scheduling.cc
@@ -30,7 +30,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/map_util.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/logging.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/hlo_verifier.cc b/tensorflow/compiler/xla/service/hlo_verifier.cc
index f60c4eab42..81ffb5ac43 100644
--- a/tensorflow/compiler/xla/service/hlo_verifier.cc
+++ b/tensorflow/compiler/xla/service/hlo_verifier.cc
@@ -128,10 +128,9 @@ Status ShapeVerifier::CheckIsTokenOperand(const HloInstruction* instruction,
const HloInstruction* token = instruction->operand(operand_no);
if (!ShapeUtil::Equal(token->shape(), ShapeUtil::MakeTokenShape())) {
return InternalError(
- "Expected operand %lld to be token-shaped, actual shape is "
+ "Expected operand %d to be token-shaped, actual shape is "
"%s:\n%s",
- operand_no, StringifyShape(token->shape()).c_str(),
- instruction->ToString().c_str());
+ operand_no, StringifyShape(token->shape()), instruction->ToString());
}
return Status::OK();
}
@@ -144,9 +143,8 @@ Status ShapeVerifier::CheckOperandAndParameter(
computation->parameter_instruction(parameter_number);
if (!ShapesSame(operand->shape(), parameter->shape())) {
return InternalError("Operand %s shape does not match parameter's %s in %s",
- operand->ToString().c_str(),
- parameter->ToString().c_str(),
- instruction->ToString().c_str());
+ operand->ToString(), parameter->ToString(),
+ instruction->ToString());
}
return Status::OK();
}
@@ -171,9 +169,8 @@ Status ShapeVerifier::HandleOutfeed(HloInstruction* instruction) {
return InternalError(
"Expected outfeed shape to be equal to operand's shape %s, "
"actual shape is %s:\n%s",
- StringifyShape(outfeed->operand(0)->shape()).c_str(),
- StringifyShape(outfeed->outfeed_shape()).c_str(),
- outfeed->ToString().c_str());
+ StringifyShape(outfeed->operand(0)->shape()),
+ StringifyShape(outfeed->outfeed_shape()), outfeed->ToString());
}
return CheckShape(outfeed, ShapeUtil::MakeTokenShape());
}
@@ -191,7 +188,7 @@ bool ShapeVerifier::HasCompatibleElementTypes(const Shape& shape_0,
Status ShapeVerifier::HandleRng(HloInstruction* instruction) {
if (instruction->operand_count() != 2) {
return InternalError("Expected two operands for Rng instruction: %s",
- instruction->ToString().c_str());
+ instruction->ToString());
}
const Shape& shape_0 = instruction->operand(0)->shape();
@@ -199,14 +196,14 @@ Status ShapeVerifier::HandleRng(HloInstruction* instruction) {
if (!ShapeUtil::IsScalar(shape_0) || !ShapeUtil::IsScalar(shape_1)) {
return InternalError(
"Expected scalar types for the two operands of Rng instruction: %s",
- instruction->ToString().c_str());
+ instruction->ToString());
}
if (!HasCompatibleElementTypes(shape_0, shape_1, instruction->shape())) {
return InternalError(
"Expected compatible element types for the result and the two operands"
" of Rng instruction: %s",
- instruction->ToString().c_str());
+ instruction->ToString());
}
PrimitiveType element_type = shape_0.element_type();
@@ -219,7 +216,7 @@ Status ShapeVerifier::HandleRng(HloInstruction* instruction) {
"Element type not supported."
" Expected element to be of floating point type, integral type or"
" predicate type for RngUniform: %s",
- instruction->ToString().c_str());
+ instruction->ToString());
}
break;
@@ -228,13 +225,13 @@ Status ShapeVerifier::HandleRng(HloInstruction* instruction) {
return InternalError(
"Element type not supported."
" Expected element to be FloatingPointType for RngNormal: %s",
- instruction->ToString().c_str());
+ instruction->ToString());
}
break;
default:
return InternalError(
"Invalid Rng distribution %s",
- RandomDistribution_Name(instruction->random_distribution()).c_str());
+ RandomDistribution_Name(instruction->random_distribution()));
}
return Status::OK();
@@ -253,8 +250,8 @@ Status ShapeVerifier::HandleSort(HloInstruction* sort) {
return InternalError(
"Expected sort to have to have the same dimensions for the keys and "
"the values. Keys shape is: %s\n, Values shape is: %s",
- StringifyShape(sort->operand(0)->shape()).c_str(),
- StringifyShape(sort->operand(1)->shape()).c_str());
+ StringifyShape(sort->operand(0)->shape()),
+ StringifyShape(sort->operand(1)->shape()));
}
return CheckVariadicShape(sort);
}
@@ -333,7 +330,7 @@ Status ShapeVerifier::HandleFusion(HloInstruction* fusion) {
int64 param_no = fused_param->parameter_number();
if (!ShapesSame(fused_param->shape(), fusion->operand(param_no)->shape())) {
return InternalError(
- "Shape mismatch between parameter number %lld and its operand in "
+ "Shape mismatch between parameter number %d and its operand in "
"%s.",
param_no, fusion->ToString().c_str());
}
@@ -425,7 +422,7 @@ Status ShapeVerifier::HandleWhile(HloInstruction* xla_while) {
return InternalError(
"Conditional computation shape does not lead to a scalar predicate "
"shape: %s",
- StringifyShape(conditional_shape).c_str());
+ StringifyShape(conditional_shape));
}
// The shape of kWhile should match the shape of the body computation it
// calls.
@@ -556,7 +553,7 @@ Status CheckMixedPrecisionOperands(const HloInstruction* instruction) {
return InternalError(
"Seen floating point types of different precisions in "
"%s, but mixed precision is disallowed.",
- instruction->ToString().c_str());
+ instruction->ToString());
}
return Status::OK();
}));
@@ -646,9 +643,8 @@ Status ShapeVerifier::CheckShape(const HloInstruction* instruction,
return InternalError(
"Expected instruction to have shape equal to %s, actual "
"shape is %s:\n%s",
- StringifyShape(inferred_shape).c_str(),
- StringifyShape(instruction->shape()).c_str(),
- instruction->ToString().c_str());
+ StringifyShape(inferred_shape), StringifyShape(instruction->shape()),
+ instruction->ToString());
}
return Status::OK();
}
@@ -713,23 +709,23 @@ Status VerifyHloStructure(HloModule* module) {
for (const HloComputation* computation : module->computations()) {
if (computation->parent() == nullptr) {
return InternalError("Computation %s has a null parent pointer",
- computation->name().c_str());
+ computation->name());
}
if (computation->parent() != module) {
return InternalError(
"Computation %s parent() does not point to parent module",
- computation->name().c_str());
+ computation->name());
}
for (const HloInstruction* instruction : computation->instructions()) {
if (instruction->parent() == nullptr) {
return InternalError("Instruction %s has a null parent pointer",
- instruction->name().c_str());
+ instruction->name());
}
if (instruction->parent() != computation) {
return InternalError(
"Instruction %s parent() does not point to parent computation",
- instruction->name().c_str());
+ instruction->name());
}
}
}
@@ -746,9 +742,8 @@ Status VerifyHloStructure(HloModule* module) {
return InternalError(
"Operand %d (%s) of instruction %s is in a different "
"computation: %s vs %s",
- i, operand->name().c_str(), instruction->name().c_str(),
- operand->parent()->name().c_str(),
- instruction->parent()->name().c_str());
+ i, operand->name(), instruction->name(),
+ operand->parent()->name(), instruction->parent()->name());
}
}
}
@@ -764,7 +759,7 @@ Status HloVerifier::CheckFusionInstruction(HloInstruction* fusion) const {
"Instruction of fused computation does not match expected "
"instruction "
"%s.",
- fusion->ToString().c_str());
+ fusion->ToString());
}
// Fused root instruction and fused parameters must all be owned by the
@@ -778,7 +773,7 @@ Status HloVerifier::CheckFusionInstruction(HloInstruction* fusion) const {
if (fused_root == instruction) {
if (root_owned) {
return InternalError("Root appears more than once in %s.",
- fusion->ToString().c_str());
+ fusion->ToString());
}
root_owned = true;
}
@@ -786,7 +781,7 @@ Status HloVerifier::CheckFusionInstruction(HloInstruction* fusion) const {
if (fused_parameters[i] == instruction) {
if (parameter_owned[i]) {
return InternalError("Parameter appears more than once in %s.",
- fusion->ToString().c_str());
+ fusion->ToString());
}
parameter_owned[i] = true;
}
@@ -794,20 +789,19 @@ Status HloVerifier::CheckFusionInstruction(HloInstruction* fusion) const {
}
if (!root_owned) {
return InternalError("Root not found in computation of %s.",
- fusion->ToString().c_str());
+ fusion->ToString());
}
// Make sure all the parameter_owned entries are set
for (int i = 0; i < parameter_owned.size(); i++) {
if (!parameter_owned[i]) {
return InternalError("Parameter %d not found in computation of %s.", i,
- fusion->ToString().c_str());
+ fusion->ToString());
}
}
// Fused root must have no users.
if (fused_root->user_count() != 0) {
- return InternalError("Root of %s may not have users.",
- fusion->ToString().c_str());
+ return InternalError("Root of %s may not have users.", fusion->ToString());
}
// All uses of fused instructions must be in the fusion computation, and
@@ -817,14 +811,13 @@ Status HloVerifier::CheckFusionInstruction(HloInstruction* fusion) const {
if (instruction != fused_root) {
if (instruction->user_count() == 0) {
return InternalError("Non-root instruction %s in %s must have users.",
- instruction->ToString().c_str(),
- fusion->ToString().c_str());
+ instruction->ToString(), fusion->ToString());
}
for (auto& user : instruction->users()) {
if (fused_computation != user->parent()) {
return InternalError(
"Non-root instruction %s in %s may not have external users.",
- instruction->ToString().c_str(), fusion->ToString().c_str());
+ instruction->ToString(), fusion->ToString());
}
}
}
@@ -837,19 +830,19 @@ Status HloVerifier::CheckFusionInstruction(HloInstruction* fusion) const {
for (auto fused_param : fused_parameters) {
int64 param_no = fused_param->parameter_number();
if (param_no < 0) {
- return InternalError("Unexpected negative parameter number %lld in %s.",
- param_no, fusion->ToString().c_str());
+ return InternalError("Unexpected negative parameter number %d in %s.",
+ param_no, fusion->ToString());
}
if (param_no >= fused_parameters.size()) {
return InternalError(
- "Unexpected parameter number %lld in %s: higher then number of "
+ "Unexpected parameter number %d in %s: higher then number of "
"parameters %lu.",
- param_no, fusion->ToString().c_str(), fused_parameters.size());
+ param_no, fusion->ToString(), fused_parameters.size());
}
if (parameter_numbers[param_no]) {
return InternalError(
- "Did not expect parameter number %lld more than once in %s.",
- param_no, fusion->ToString().c_str());
+ "Did not expect parameter number %d more than once in %s.", param_no,
+ fusion->ToString());
}
parameter_numbers[param_no] = true;
}
@@ -857,7 +850,7 @@ Status HloVerifier::CheckFusionInstruction(HloInstruction* fusion) const {
for (int i = 0; i < parameter_numbers.size(); i++) {
if (!parameter_numbers[i]) {
return InternalError("Did not see parameter number %d in %s.", i,
- fusion->ToString().c_str());
+ fusion->ToString());
}
}
@@ -872,18 +865,18 @@ Status HloVerifier::CheckWhileInstruction(HloInstruction* instruction) {
auto* while_body = instruction->while_body();
if (while_cond->num_parameters() != 1) {
return FailedPrecondition(
- "While condition must have exactly 1 parameter; had %lld : %s",
- while_cond->num_parameters(), while_cond->ToString().c_str());
+ "While condition must have exactly 1 parameter; had %d : %s",
+ while_cond->num_parameters(), while_cond->ToString());
}
if (while_body->num_parameters() != 1) {
return FailedPrecondition(
- "While body must have exactly 1 parameter; had %lld : %s",
- while_body->num_parameters(), while_body->ToString().c_str());
+ "While body must have exactly 1 parameter; had %d : %s",
+ while_body->num_parameters(), while_body->ToString());
}
if (instruction->operand_count() != 1) {
return FailedPrecondition(
- "While loop must have exactly one operand; had %lld : %s",
- instruction->operand_count(), instruction->ToString().c_str());
+ "While loop must have exactly one operand; had %d : %s",
+ instruction->operand_count(), instruction->ToString());
}
return Status::OK();
}
@@ -891,16 +884,14 @@ Status HloVerifier::CheckWhileInstruction(HloInstruction* instruction) {
Status HloVerifier::CheckConditionalInstruction(HloInstruction* instruction) {
if (instruction->true_computation()->num_parameters() != 1) {
return FailedPrecondition(
- "True computation %s of %s must have 1 parameter insted of %lld",
- instruction->true_computation()->name().c_str(),
- instruction->ToString().c_str(),
+ "True computation %s of %s must have 1 parameter insted of %d",
+ instruction->true_computation()->name(), instruction->ToString(),
instruction->true_computation()->num_parameters());
}
if (instruction->false_computation()->num_parameters() != 1) {
return FailedPrecondition(
- "False computation %s of %s must have 1 parameter insted of %lld",
- instruction->false_computation()->name().c_str(),
- instruction->ToString().c_str(),
+ "False computation %s of %s must have 1 parameter insted of %d",
+ instruction->false_computation()->name(), instruction->ToString(),
instruction->false_computation()->num_parameters());
}
return Status::OK();
@@ -915,9 +906,9 @@ Status HloVerifier::CheckElementwiseInstruction(HloInstruction* instruction) {
"Implicit broadcast is not allowed in HLO."
"Found different shapes for instruction %s.\n"
"output: %s\noperand: %s\n",
- HloOpcodeString(instruction->opcode()).c_str(),
- ShapeUtil::HumanString(out_shape).c_str(),
- ShapeUtil::HumanString(operand_shape).c_str());
+ HloOpcodeString(instruction->opcode()),
+ ShapeUtil::HumanString(out_shape),
+ ShapeUtil::HumanString(operand_shape));
}
}
return Status::OK();
@@ -948,7 +939,7 @@ Status VerifyEntryAndExitShapes(const HloModule& module) {
if (ShapeContainsToken(param->shape())) {
return InternalError(
"Entry parameter %d is or contains a token shape: %s", i,
- ShapeUtil::HumanString(param->shape()).c_str());
+ ShapeUtil::HumanString(param->shape()));
}
}
return Status::OK();
@@ -960,9 +951,9 @@ Status CheckSameChannel(const HloInstruction* instr1,
if (instr1->channel_id() != instr2->channel_id()) {
return InternalError(
"Expected to have the same channel id, actual channel ids are: %s "
- "(%lld), %s (%lld)",
- instr1->ToString().c_str(), instr1->channel_id(),
- instr2->ToString().c_str(), instr2->channel_id());
+ "(%d), %s (%d)",
+ instr1->ToString(), instr1->channel_id(), instr2->ToString(),
+ instr2->channel_id());
}
return Status::OK();
}
@@ -983,7 +974,7 @@ Status CheckSameIsHostTransfer(const HloInstruction* instr1,
"Expected instructions to have the same is-host-transfer property: "
"%s, "
"%s ",
- instr1->ToString().c_str(), instr2->ToString().c_str());
+ instr1->ToString(), instr2->ToString());
}
return Status::OK();
}
@@ -1000,12 +991,12 @@ Status VerifySendsAndRecvs(const HloModule& module) {
host_channels.insert({sendrecv->channel_id(), sendrecv});
if (!it_inserted.second) {
return FailedPrecondition(
- "Channel %lld is used for multiple host send/recv instructions: "
+ "Channel %d is used for multiple host send/recv instructions: "
"%s "
"and "
"%s",
- sendrecv->channel_id(), sendrecv->ToString().c_str(),
- it_inserted.first->second->ToString().c_str());
+ sendrecv->channel_id(), sendrecv->ToString(),
+ it_inserted.first->second->ToString());
}
}
diff --git a/tensorflow/compiler/xla/service/human_readable_profile_builder.cc b/tensorflow/compiler/xla/service/human_readable_profile_builder.cc
index 581b3ce1e0..e76b93107c 100644
--- a/tensorflow/compiler/xla/service/human_readable_profile_builder.cc
+++ b/tensorflow/compiler/xla/service/human_readable_profile_builder.cc
@@ -15,26 +15,26 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/human_readable_profile_builder.h"
#include "absl/strings/str_cat.h"
+#include "absl/strings/str_format.h"
#include "tensorflow/compiler/xla/metric_table_report.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/lib/strings/numbers.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
namespace xla {
using absl::StrAppend;
+using absl::StrAppendFormat;
using absl::StrCat;
-using tensorflow::strings::Appendf;
+using absl::StrFormat;
using tensorflow::strings::HumanReadableElapsedTime;
using tensorflow::strings::HumanReadableNumBytes;
-using tensorflow::strings::Printf;
string HumanReadableProfileBuilder::ToString() const {
string s;
- Appendf(&s, "Execution profile for %s: (%s @ f_nom)\n",
- computation_name_.c_str(),
- HumanReadableElapsedTime(CyclesToSeconds(total_cycles_)).c_str());
+ StrAppendFormat(&s, "Execution profile for %s: (%s @ f_nom)\n",
+ computation_name_,
+ HumanReadableElapsedTime(CyclesToSeconds(total_cycles_)));
int64 cumulative_cycles = 0;
auto print_op = [&](const OpInfo& op, bool is_total = false) {
@@ -56,7 +56,7 @@ string HumanReadableProfileBuilder::ToString() const {
if (op.bytes_accessed > op.cycles) {
bytes_per_cycle = StrCat(HumanReadableNumBytes(bpc), "/cycle");
} else {
- bytes_per_cycle = Printf("%.3fB/cycle", bpc);
+ bytes_per_cycle = StrFormat("%.3fB/cycle", bpc);
}
}
@@ -77,27 +77,24 @@ string HumanReadableProfileBuilder::ToString() const {
// columns in the output.
cycles_percent_str = "100.% 100Σ";
} else {
- cycles_percent_str =
- Printf("%5.2f%% %2.0fΣ", cycles_percent, cumulative_cycles_percent);
+ cycles_percent_str = StrFormat("%5.2f%% %2.0fΣ", cycles_percent,
+ cumulative_cycles_percent);
}
double nsecs = op.cycles / clock_rate_ghz_;
- Appendf(
+ StrAppendFormat(
&s,
- "%15lld cycles (%s) :: %12.1f usec %22s :: %18s :: %18s :: %14s :: "
+ "%15d cycles (%s) :: %12.1f usec %22s :: %18s :: %18s :: %14s :: "
"%16s :: %s\n",
- op.cycles, cycles_percent_str.c_str(), CyclesToMicroseconds(op.cycles),
+ op.cycles, cycles_percent_str, CyclesToMicroseconds(op.cycles),
op.optimal_seconds < 0
? ""
- : Printf("(%12.1f optimal)", op.optimal_seconds * 1e6).c_str(),
- op.flop_count <= 0
- ? ""
- : HumanReadableNumFlops(op.flop_count, nsecs).c_str(),
+ : StrFormat("(%12.1f optimal)", op.optimal_seconds * 1e6),
+ op.flop_count <= 0 ? "" : HumanReadableNumFlops(op.flop_count, nsecs),
op.transcendental_count <= 0
? ""
- : HumanReadableNumTranscendentalOps(op.transcendental_count, nsecs)
- .c_str(),
- bytes_per_sec.c_str(), bytes_per_cycle.c_str(), op.name.c_str());
+ : HumanReadableNumTranscendentalOps(op.transcendental_count, nsecs),
+ bytes_per_sec, bytes_per_cycle, op.name);
};
float optimal_seconds_sum = 0.0;
diff --git a/tensorflow/compiler/xla/service/interpreter/platform.cc b/tensorflow/compiler/xla/service/interpreter/platform.cc
index e57a9b3672..c9b40d3c61 100644
--- a/tensorflow/compiler/xla/service/interpreter/platform.cc
+++ b/tensorflow/compiler/xla/service/interpreter/platform.cc
@@ -18,13 +18,13 @@ limitations under the License.
#include <utility>
#include "absl/memory/memory.h"
+#include "absl/strings/str_format.h"
#include "tensorflow/compiler/xla/service/interpreter/executor.h"
#include "tensorflow/stream_executor/device_options.h"
#include "tensorflow/stream_executor/lib/initialize.h"
#include "tensorflow/stream_executor/lib/ptr_util.h"
#include "tensorflow/stream_executor/lib/status.h"
#include "tensorflow/stream_executor/lib/status_macros.h"
-#include "tensorflow/stream_executor/lib/stringprintf.h"
#include "tensorflow/stream_executor/multi_platform_manager.h"
#include "tensorflow/stream_executor/platform.h"
@@ -77,9 +77,9 @@ XlaInterpreterPlatform::GetUncachedExecutor(
if (!init_status.ok()) {
return port::Status{
port::error::INTERNAL,
- port::Printf(
+ absl::StrFormat(
"failed initializing StreamExecutor for device ordinal %d: %s",
- config.ordinal, init_status.ToString().c_str())};
+ config.ordinal, init_status.ToString())};
}
return std::move(executor);
diff --git a/tensorflow/compiler/xla/service/layout_assignment.cc b/tensorflow/compiler/xla/service/layout_assignment.cc
index 5741864282..75d6d22a48 100644
--- a/tensorflow/compiler/xla/service/layout_assignment.cc
+++ b/tensorflow/compiler/xla/service/layout_assignment.cc
@@ -28,6 +28,7 @@ limitations under the License.
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
+#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "tensorflow/compiler/xla/layout_util.h"
#include "tensorflow/compiler/xla/map_util.h"
@@ -51,7 +52,6 @@ limitations under the License.
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
@@ -71,9 +71,8 @@ BufferLayoutConstraint::BufferLayoutConstraint(const Layout& layout,
}
string BufferLayoutConstraint::ToString() const {
- return tensorflow::strings::Printf("BufferLayoutConstraint %s: %s",
- buffer_->ToString().c_str(),
- LayoutUtil::HumanString(layout_).c_str());
+ return absl::StrFormat("BufferLayoutConstraint %s: %s", buffer_->ToString(),
+ LayoutUtil::HumanString(layout_));
}
OperandLayoutConstraint::OperandLayoutConstraint(
@@ -92,15 +91,14 @@ OperandLayoutConstraint::OperandLayoutConstraint(
}
string OperandLayoutConstraint::ToString() const {
- return tensorflow::strings::Printf(
- "OperandLayoutConstraint %s, operand %lld: %s",
- instruction_->name().c_str(), operand_no_,
- shape_layout_.ToString().c_str());
+ return absl::StrFormat("OperandLayoutConstraint %s, operand %d: %s",
+ instruction_->name(), operand_no_,
+ shape_layout_.ToString());
}
string ResultLayoutConstraint::ToString() const {
- return tensorflow::strings::Printf("ResultLayoutConstraint: %s",
- shape_layout_.ToString().c_str());
+ return absl::StrFormat("ResultLayoutConstraint: %s",
+ shape_layout_.ToString());
}
LayoutConstraints::LayoutConstraints(
@@ -168,8 +166,7 @@ Status LayoutConstraints::SetBufferLayout(const Layout& layout,
return FailedPrecondition(
"Layout of buffer %s cannot be constrained because buffer is not "
"array-shaped, has shape: %s",
- buffer.ToString().c_str(),
- ShapeUtil::HumanString(buffer.shape()).c_str());
+ buffer.ToString(), ShapeUtil::HumanString(buffer.shape()));
}
TF_RETURN_IF_ERROR(
LayoutUtil::ValidateLayoutForShape(layout, buffer.shape()));
@@ -185,9 +182,8 @@ Status LayoutConstraints::SetBufferLayout(const Layout& layout,
return FailedPrecondition(
"Buffer %s already has the layout constraint %s, cannot add "
"incompatible constraint %s",
- buffer.ToString().c_str(),
- LayoutUtil::HumanString(curr_constraint.layout()).c_str(),
- LayoutUtil::HumanString(layout).c_str());
+ buffer.ToString(), LayoutUtil::HumanString(curr_constraint.layout()),
+ LayoutUtil::HumanString(layout));
}
iter->second = BufferLayoutConstraint(layout, buffer, mandatory, dfs);
} else {
@@ -221,11 +217,11 @@ Status LayoutConstraints::SetOperandLayout(const Shape& shape_with_layout,
}
if (curr_shape_layout->mandatory()) {
return FailedPrecondition(
- "Operand %lld of instruction %s already has a layout constraint "
+ "Operand %d of instruction %s already has a layout constraint "
"%s, cannot add incompatible constraint %s",
- operand_no, instruction->name().c_str(),
- curr_shape_layout->shape_layout().ToString().c_str(),
- ShapeUtil::HumanStringWithLayout(shape_with_layout).c_str());
+ operand_no, instruction->name(),
+ curr_shape_layout->shape_layout().ToString(),
+ ShapeUtil::HumanStringWithLayout(shape_with_layout));
}
}
@@ -234,9 +230,9 @@ Status LayoutConstraints::SetOperandLayout(const Shape& shape_with_layout,
// layouts beyond this immediate use and is complicated to handle.
if (OperandBufferForwarded(instruction, operand_no)) {
return FailedPrecondition(
- "Cannot constraint layout of operand %lld of instruction %s "
+ "Cannot constraint layout of operand %d of instruction %s "
"because instruction forwards operand's LogicalBuffer(s)",
- operand_no, instruction->name().c_str());
+ operand_no, instruction->name());
}
auto key = std::make_pair(instruction, operand_no);
@@ -278,8 +274,8 @@ Status LayoutConstraints::SetResultLayout(const Shape& shape_with_layout,
return FailedPrecondition(
"Result of computation %s already has the layout constraint %s, "
"cannot add incompatible constraint %s",
- computation_->name().c_str(), curr_shape_layout->ToString().c_str(),
- ShapeUtil::HumanStringWithLayout(shape_with_layout).c_str());
+ computation_->name(), curr_shape_layout->ToString(),
+ ShapeUtil::HumanStringWithLayout(shape_with_layout));
}
// New constraint matches existing constraint. Nothing to do.
return Status::OK();
@@ -301,9 +297,8 @@ Status LayoutConstraints::SetInstructionLayout(
if (!ShapeUtil::Compatible(shape_with_layout, instruction->shape())) {
return FailedPrecondition(
"Instruction %s of shape %s cannot be assigned incompatible layout %s",
- instruction->name().c_str(),
- ShapeUtil::HumanString(instruction->shape()).c_str(),
- ShapeUtil::HumanStringWithLayout(shape_with_layout).c_str());
+ instruction->name(), ShapeUtil::HumanString(instruction->shape()),
+ ShapeUtil::HumanStringWithLayout(shape_with_layout));
}
// Create a BufferLayoutConstraint for each array shape in the output of the
@@ -753,7 +748,7 @@ Status CheckParameterLayout(HloInstruction* parameter,
return InternalError(
"parameter instruction %s does not match layout of computation "
"shape: %s",
- parameter->ToString().c_str(), parameter_layout.ToString().c_str());
+ parameter->ToString(), parameter_layout.ToString());
}
return Status::OK();
}
@@ -764,8 +759,8 @@ Status CheckConstantLayout(HloInstruction* constant) {
constant->shape())) {
return InternalError(
"constant instruction %s does not match the layout of its literal %s",
- constant->ToString().c_str(),
- ShapeUtil::HumanStringWithLayout(constant->literal().shape()).c_str());
+ constant->ToString(),
+ ShapeUtil::HumanStringWithLayout(constant->literal().shape()));
}
return Status::OK();
}
@@ -898,13 +893,10 @@ Status LayoutAssignment::CheckLayouts(HloModule* module) {
return InternalError(
"Layout of instruction %s at index {%s} does not match "
"source LogicalBuffer %s: %s vs %s",
- instruction->name().c_str(),
- absl::StrJoin(index, ",").c_str(),
- buffer->ToString().c_str(),
- ShapeUtil::HumanStringWithLayout(instruction_subshape)
- .c_str(),
- ShapeUtil::HumanStringWithLayout(buffer->shape())
- .c_str());
+ instruction->name(), absl::StrJoin(index, ","),
+ buffer->ToString(),
+ ShapeUtil::HumanStringWithLayout(instruction_subshape),
+ ShapeUtil::HumanStringWithLayout(buffer->shape()));
}
}
}
@@ -1375,7 +1367,7 @@ StatusOr<Layout> InferArrayLayout(
// This should not happen because we've assigned layouts to all
// instructions preceding this one.
return InternalError("LogicalBuffer %s does not have a layout",
- source_buffer->ToString().c_str());
+ source_buffer->ToString());
}
if (first_buffer_layout == nullptr) {
@@ -1390,9 +1382,8 @@ StatusOr<Layout> InferArrayLayout(
return FailedPrecondition(
"Array at index {%s} in instruction %s aliases buffers %s "
"and %s which have different layouts",
- absl::StrJoin(index, ",").c_str(), instruction->name().c_str(),
- source_buffers[0]->ToString().c_str(),
- source_buffer->ToString().c_str());
+ absl::StrJoin(index, ","), instruction->name(),
+ source_buffers[0]->ToString(), source_buffer->ToString());
}
}
@@ -1560,7 +1551,7 @@ Status LayoutAssignment::ClearComputationLayouts(HloComputation* computation) {
// present in the IR before layout assignment is a bug.
return InternalError(
"Unexpected bitcast operation seen during layout assignment: %s.",
- instruction->ToString().c_str());
+ instruction->ToString());
}
if (instruction->opcode() != HloOpcode::kInfeed) {
LayoutUtil::ClearLayout(instruction->mutable_shape());
diff --git a/tensorflow/compiler/xla/service/llvm_ir/BUILD b/tensorflow/compiler/xla/service/llvm_ir/BUILD
index fc3289f30d..786448ea76 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/BUILD
+++ b/tensorflow/compiler/xla/service/llvm_ir/BUILD
@@ -125,6 +125,7 @@ cc_library(
"//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/core:lib",
+ "@com_google_absl//absl/strings:str_format",
"@llvm//:core",
],
)
diff --git a/tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.cc b/tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.cc
index 72ede377e1..6d637cad6d 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.cc
@@ -98,7 +98,7 @@ Status FusedIrEmitter::HandleGetTupleElement(
return Unimplemented(
"GetTupleElement fusion currently only supports"
" parameter operands, but found operand: %s",
- operand->name().c_str());
+ operand->name());
}
// Emit code to lookup tuple element pointer, and store it in 'gte_values_'.
llvm::Value* tuple_element_ptr = llvm_ir::EmitGetTupleElement(
diff --git a/tensorflow/compiler/xla/service/llvm_ir/llvm_loop.cc b/tensorflow/compiler/xla/service/llvm_ir/llvm_loop.cc
index 978fa5b453..2f6720b042 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/llvm_loop.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/llvm_loop.cc
@@ -26,7 +26,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/logging.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/llvm_ir/loop_emitter.cc b/tensorflow/compiler/xla/service/llvm_ir/loop_emitter.cc
index cf7445804c..1553b4fc91 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/loop_emitter.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/loop_emitter.cc
@@ -18,13 +18,13 @@ limitations under the License.
#include <memory>
#include <utility>
+#include "absl/strings/str_format.h"
#include "tensorflow/compiler/xla/service/llvm_ir/llvm_loop.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/lib/core/errors.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/types.h"
@@ -105,7 +105,7 @@ std::vector<IrArray::Index> LoopEmitter::EmitIndexAndSetExitBasicBlock(
std::unique_ptr<ForLoop> loop = loop_nest.AddLoop(
/*start_index=*/0,
/*end_index=*/shape_.dimensions(dimension),
- /*suffix=*/tensorflow::strings::Printf("dim.%lld", dimension));
+ /*suffix=*/absl::StrFormat("dim.%d", dimension));
array_index[dimension] = loop->GetIndVarValue();
}
diff --git a/tensorflow/compiler/xla/service/local_service.cc b/tensorflow/compiler/xla/service/local_service.cc
index ea59adadea..768105d9e1 100644
--- a/tensorflow/compiler/xla/service/local_service.cc
+++ b/tensorflow/compiler/xla/service/local_service.cc
@@ -21,6 +21,7 @@ limitations under the License.
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
+#include "absl/strings/str_format.h"
#include "tensorflow/compiler/xla/client/executable_build_options.h"
#include "tensorflow/compiler/xla/client/xla_computation.h"
#include "tensorflow/compiler/xla/execution_options_util.h"
@@ -149,7 +150,7 @@ StatusOr<std::unique_ptr<Executable>> LocalService::CompileExecutable(
// Validate incoming layouts.
if (argument_layouts.size() != program_shape.parameters_size()) {
return InvalidArgument(
- "Invalid number of arguments for computation: expected %d, got %zu.",
+ "Invalid number of arguments for computation: expected %d, got %u.",
program_shape.parameters_size(), argument_layouts.size());
}
@@ -167,16 +168,15 @@ StatusOr<std::unique_ptr<Executable>> LocalService::CompileExecutable(
CHECK(metadata.value() != nullptr);
const OpMetadata& m = *metadata.value();
if (!m.source_file().empty()) {
- return tensorflow::strings::Printf(
- " (%s:%d)", m.source_file().c_str(), m.source_line());
+ return absl::StrFormat(" (%s:%d)", m.source_file(), m.source_line());
}
return "";
};
return InvalidArgument(
"Invalid argument shape for argument %d%s, expected %s, got %s.", i,
- metadata_string().c_str(),
- ShapeUtil::HumanString(program_shape.parameters(i)).c_str(),
- ShapeUtil::HumanString(argument_shape).c_str());
+ metadata_string(),
+ ShapeUtil::HumanString(program_shape.parameters(i)),
+ ShapeUtil::HumanString(argument_shape));
}
}
if (build_options.result_layout() != nullptr) {
@@ -214,7 +214,7 @@ StatusOr<const ShapedBuffer*> LocalService::GlobalDataToShapedBuffer(
TF_ASSIGN_OR_RETURN(auto buffers, allocation_tracker_.Resolve(data));
if (replica_number >= buffers.size()) {
return InvalidArgument(
- "replica_number %d out of range; must be less than num_replicas = %zu.",
+ "replica_number %d out of range; must be less than num_replicas = %u.",
replica_number, buffers.size());
}
return buffers[replica_number];
diff --git a/tensorflow/compiler/xla/service/platform_util.cc b/tensorflow/compiler/xla/service/platform_util.cc
index 150af0cd93..ae1e13d8a6 100644
--- a/tensorflow/compiler/xla/service/platform_util.cc
+++ b/tensorflow/compiler/xla/service/platform_util.cc
@@ -98,7 +98,7 @@ PlatformUtil::GetSupportedPlatforms() {
[](string* out, const se::Platform* p) { out->append(p->Name()); });
return InvalidArgument(
"must specify platform because more than one platform found: %s",
- platforms_string.c_str());
+ platforms_string);
}
/* static */ StatusOr<se::Platform*> PlatformUtil::GetDefaultPlatform() {
@@ -123,7 +123,7 @@ PlatformUtil::GetSupportedPlatforms() {
return InvalidArgument(
"must specify platform because more than one platform (except for the "
"interpreter platform) found: %s",
- platforms_string.c_str());
+ platforms_string);
}
/*static*/ StatusOr<se::Platform*> PlatformUtil::GetPlatform(
@@ -135,7 +135,7 @@ PlatformUtil::GetSupportedPlatforms() {
return platform;
}
}
- return InvalidArgument("platform %s not found", platform_name.c_str());
+ return InvalidArgument("platform %s not found", platform_name);
}
/*static*/ StatusOr<se::Platform*> PlatformUtil::GetPlatformExceptFor(
@@ -151,7 +151,7 @@ PlatformUtil::GetSupportedPlatforms() {
}
if (matched.empty()) {
return InvalidArgument("unable to find platform that is not %s",
- platform_name.c_str());
+ platform_name);
}
if (matched.size() == 1) {
return matched[0];
@@ -161,7 +161,7 @@ PlatformUtil::GetSupportedPlatforms() {
[](string* out, const se::Platform* p) { out->append(p->Name()); });
return InvalidArgument(
"found multiple platforms %s, but expected one platform except for %s",
- matched_string.c_str(), platform_name.c_str());
+ matched_string, platform_name);
}
// Returns whether the device underlying the given StreamExecutor is supported
@@ -192,7 +192,7 @@ static bool IsDeviceSupported(se::StreamExecutor* executor) {
PlatformUtil::GetStreamExecutors(se::Platform* platform) {
int device_count = platform->VisibleDeviceCount();
if (device_count <= 0) {
- return NotFound("no %s devices found", platform->Name().c_str());
+ return NotFound("no %s devices found", platform->Name());
}
if (platform->id() == se::host::kHostPlatformId) {
// On host "devices", StreamExecutor exports a device for each hardware
@@ -231,7 +231,7 @@ PlatformUtil::GetStreamExecutors(se::Platform* platform) {
if (std::all_of(stream_executors.begin(), stream_executors.end(),
[](se::StreamExecutor* s) { return s == nullptr; })) {
return InternalError("no supported devices found for platform %s",
- platform->Name().c_str());
+ platform->Name());
}
return stream_executors;
}
diff --git a/tensorflow/compiler/xla/service/scatter_expander.cc b/tensorflow/compiler/xla/service/scatter_expander.cc
index 338f0c09e9..2077b57c05 100644
--- a/tensorflow/compiler/xla/service/scatter_expander.cc
+++ b/tensorflow/compiler/xla/service/scatter_expander.cc
@@ -291,7 +291,7 @@ StatusOr<HloInstruction*> ScatterExpander::ExpandScatter(
return Unimplemented(
"Scatter operations with more than 2147483647 scatter indices are not "
"supported. This error occurred for %s.",
- scatter->ToString().c_str());
+ scatter->ToString());
}
// Canonicalize the scatter_indices, after which the size of its most-major
diff --git a/tensorflow/compiler/xla/service/service.cc b/tensorflow/compiler/xla/service/service.cc
index d39a5191b8..e10c1d9927 100644
--- a/tensorflow/compiler/xla/service/service.cc
+++ b/tensorflow/compiler/xla/service/service.cc
@@ -22,6 +22,7 @@ limitations under the License.
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
+#include "absl/strings/str_format.h"
#include "tensorflow/compiler/xla/execution_options_util.h"
#include "tensorflow/compiler/xla/layout_util.h"
#include "tensorflow/compiler/xla/legacy_flags/debug_options_flags.h"
@@ -47,7 +48,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
@@ -55,13 +55,12 @@ limitations under the License.
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/ptr_util.h"
-using absl::StrCat;
-using ::tensorflow::strings::Printf;
-
namespace xla {
-
namespace {
+using absl::StrCat;
+using absl::StrFormat;
+
// Records the arguments used to invoke a computation in an HloSnapshot proto.
Status RecordArguments(
const tensorflow::gtl::ArraySlice<const ShapedBuffer*> arguments,
@@ -148,19 +147,19 @@ Service::Service(const ServiceOptions& options,
CHECK_GE(execute_backend_->device_count(), options_.number_of_replicas())
<< "Requested more replicas than there are devices.";
}
- LOG(INFO) << Printf(
+ LOG(INFO) << StrFormat(
"XLA service %p executing computations on platform %s. Devices:", this,
- execute_backend_->platform()->Name().c_str());
+ execute_backend_->platform()->Name());
for (int i = 0; i < execute_backend_->device_count(); ++i) {
if (execute_backend_->device_ordinal_supported(i)) {
se::StreamExecutor* executor =
execute_backend_->stream_executor(i).ValueOrDie();
const auto& description = executor->GetDeviceDescription();
- LOG(INFO) << Printf(" StreamExecutor device (%d): %s, %s", i,
- description.name().c_str(),
- description.platform_version().c_str());
+ LOG(INFO) << StrFormat(" StreamExecutor device (%d): %s, %s", i,
+ description.name(),
+ description.platform_version());
} else {
- LOG(INFO) << Printf(" StreamExecutor device (%d) not supported", i);
+ LOG(INFO) << StrFormat(" StreamExecutor device (%d) not supported", i);
}
}
} else {
@@ -200,8 +199,8 @@ Status Service::ValidateResultShape(const Shape& client_shape,
return InvalidArgument(
"Shape used to set computation result layout %s is not compatible "
"with result shape %s",
- ShapeUtil::HumanStringWithLayout(client_shape).c_str(),
- ShapeUtil::HumanString(result_shape).c_str());
+ ShapeUtil::HumanStringWithLayout(client_shape),
+ ShapeUtil::HumanString(result_shape));
}
return Status::OK();
}
@@ -231,9 +230,9 @@ Service::ResolveAndValidateArguments(
return InvalidArgument(
"argument %lu is on device %s:%d but computation will be executed "
"on device %s",
- i, shaped_buffer->platform()->Name().c_str(),
+ i, shaped_buffer->platform()->Name(),
shaped_buffer->device_ordinal(),
- execute_backend_->device_name(replica_device_ordinal).c_str());
+ execute_backend_->device_name(replica_device_ordinal));
}
replicated_arguments[replica].push_back(shaped_buffer);
}
@@ -249,7 +248,7 @@ StatusOr<std::unique_ptr<HloModuleConfig>> Service::CreateModuleConfig(
ComputationLayout* computation_layout =
config->mutable_entry_computation_layout();
if (program_shape.parameters_size() != argument_shapes.size()) {
- return InvalidArgument("computation takes %d parameters, but %zu given",
+ return InvalidArgument("computation takes %d parameters, but %u given",
program_shape.parameters_size(),
argument_shapes.size());
}
@@ -261,8 +260,8 @@ StatusOr<std::unique_ptr<HloModuleConfig>> Service::CreateModuleConfig(
return InvalidArgument(
"Argument does not match shape of computation parameter %d: want "
"%s, got %s",
- i, ShapeUtil::HumanString(program_shape.parameters(i)).c_str(),
- ShapeUtil::HumanString(*argument_shapes[i]).c_str());
+ i, ShapeUtil::HumanString(program_shape.parameters(i)),
+ ShapeUtil::HumanString(*argument_shapes[i]));
}
TF_RETURN_IF_ERROR(
computation_layout->mutable_parameter_layout(i)->CopyLayoutFromShape(
@@ -314,7 +313,7 @@ StatusOr<std::vector<std::unique_ptr<Executable>>> Service::BuildExecutables(
std::vector<std::unique_ptr<HloModuleConfig>> module_configs,
Backend* backend, std::vector<std::vector<se::StreamExecutor*>> executors,
DeviceMemoryAllocator* device_allocator) {
- VLOG(1) << Printf("BuildExecutable on service %p", this);
+ VLOG(1) << StrFormat("BuildExecutable on service %p", this);
// Dump computation proto state if flag is set.
std::vector<std::unique_ptr<HloSnapshot>> hlo_snapshots;
@@ -329,9 +328,8 @@ StatusOr<std::vector<std::unique_ptr<Executable>>> Service::BuildExecutables(
auto hlo_snapshot = absl::make_unique<HloSnapshot>();
*hlo_snapshot->mutable_hlo()->mutable_hlo_module() = *module_protos[i];
if (!directory_path.empty()) {
- string filename =
- Printf("computation_%lld__%s", module_protos[i]->id(),
- module_protos[i]->entry_computation_name().c_str());
+ string filename = StrFormat("computation_%d__%s", module_protos[i]->id(),
+ module_protos[i]->entry_computation_name());
TF_RETURN_IF_ERROR(
Executable::DumpToDirectory(directory_path, filename, *hlo_snapshot));
}
@@ -454,8 +452,8 @@ Service::ExecuteParallelAndRegisterResult(
for (int64 i = 0; i < streams.size(); ++i) {
Status block_status = streams[i]->BlockHostUntilDone();
if (!block_status.ok()) {
- return InternalError("failed to complete execution for stream %lld: %s",
- i, block_status.error_message().c_str());
+ return InternalError("failed to complete execution for stream %d: %s", i,
+ block_status.error_message());
}
}
@@ -580,7 +578,7 @@ StatusOr<std::vector<se::StreamExecutor*>> Service::GetExecutors(
if (requests_size > 1 && execution_options.device_handles_size() > 1) {
return InvalidArgument(
"Parallel requests with multiple device handles is not supported. "
- "Found %lld parallel requests, with request %lld containing %d device "
+ "Found %d parallel requests, with request %d containing %d device "
"handles.",
requests_size, request_index, execution_options.device_handles_size());
}
@@ -745,8 +743,8 @@ Status Service::GetDeviceHandles(const GetDeviceHandlesRequest* arg,
}
if (available_device_count < arg->device_count() * replica_count) {
return ResourceExhausted(
- "Requested device count (%lld) exceeds the number of available devices "
- "on the target (%lld)",
+ "Requested device count (%d) exceeds the number of available devices "
+ "on the target (%d)",
arg->device_count(), available_device_count);
}
@@ -796,9 +794,9 @@ StatusOr<std::unique_ptr<Executable>> Service::BuildExecutable(
const HloModuleProto& module_proto,
std::unique_ptr<HloModuleConfig> module_config, Backend* backend,
se::StreamExecutor* executor, DeviceMemoryAllocator* device_allocator) {
- VLOG(1) << Printf(
+ VLOG(1) << StrFormat(
"BuildExecutable on service %p with serialized module proto: %s", this,
- module_proto.name().c_str());
+ module_proto.name());
// Dump computation proto state if flag is set.
auto hlo_snapshot = absl::make_unique<HloSnapshot>();
@@ -809,8 +807,8 @@ StatusOr<std::unique_ptr<Executable>> Service::BuildExecutable(
if (!directory_path.empty() || !execution_directory_path.empty()) {
*hlo_snapshot->mutable_hlo()->mutable_hlo_module() = module_proto;
if (!directory_path.empty()) {
- string filename = Printf("computation_%lld__%s", module_proto.id(),
- module_proto.entry_computation_name().c_str());
+ string filename = StrFormat("computation_%d__%s", module_proto.id(),
+ module_proto.entry_computation_name());
TF_RETURN_IF_ERROR(
Executable::DumpToDirectory(directory_path, filename, *hlo_snapshot));
}
@@ -1010,8 +1008,7 @@ Status Service::TransferToInfeed(const TransferToInfeedRequest* arg,
"%s",
StrCat("The replica_id=", arg->replica_id(),
" on TransferToInfeedRequest not in range [0, replica_count=",
- replica_count, ").")
- .c_str());
+ replica_count, ")."));
}
se::StreamExecutor* executor;
@@ -1037,8 +1034,7 @@ Status Service::TransferFromOutfeed(const TransferFromOutfeedRequest* arg,
const int64 replica_count = options_.number_of_replicas();
if (arg->replica_id() < 0 || arg->replica_id() >= replica_count) {
return FailedPrecondition(
- "The replica_id=%lld on TransferFromOutfeedRequest not in range [0, "
- "%lld)",
+ "The replica_id=%d on TransferFromOutfeedRequest not in range [0, %d)",
arg->replica_id(), replica_count);
}
diff --git a/tensorflow/compiler/xla/service/shape_inference.cc b/tensorflow/compiler/xla/service/shape_inference.cc
index 6a22f8bef4..ae6a366d25 100644
--- a/tensorflow/compiler/xla/service/shape_inference.cc
+++ b/tensorflow/compiler/xla/service/shape_inference.cc
@@ -23,6 +23,7 @@ limitations under the License.
#include "absl/algorithm/container.h"
#include "absl/strings/str_cat.h"
+#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/xla/shape_util.h"
@@ -34,15 +35,14 @@ limitations under the License.
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/flatset.h"
#include "tensorflow/core/lib/math/math_util.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
namespace xla {
namespace {
+using absl::StrFormat;
using absl::StrJoin;
-using tensorflow::strings::Printf;
// Returns true if no element is present in slice more than once.
bool AllUnique(tensorflow::gtl::ArraySlice<int64> slice) {
@@ -52,8 +52,7 @@ bool AllUnique(tensorflow::gtl::ArraySlice<int64> slice) {
Status ExpectArray(const Shape& shape, absl::string_view op_type) {
if (!ShapeUtil::IsArray(shape)) {
return InvalidArgument("Expected array argument for %s, but got %s.",
- std::string(op_type).c_str(),
- ShapeUtil::HumanString(shape).c_str());
+ std::string(op_type), ShapeUtil::HumanString(shape));
}
return Status::OK();
}
@@ -65,7 +64,7 @@ Status VerifyReducerShape(
int64 inputs) {
if (reducer_shape.parameters_size() != inputs * 2) {
return InvalidArgument(
- "Reduction function must take %lld parameters, but "
+ "Reduction function must take %d parameters, but "
"takes %d parameter(s).",
inputs * 2, reducer_shape.parameters_size());
}
@@ -75,7 +74,7 @@ Status VerifyReducerShape(
if (ShapeUtil::IsArray(accumulator_shape)) {
if (inputs != 1) {
return InvalidArgument(
- "Reduction function must produce a tuple with %lld elements, but "
+ "Reduction function must produce a tuple with %d elements, but "
"produces a scalar",
inputs);
}
@@ -83,8 +82,8 @@ Status VerifyReducerShape(
} else if (ShapeUtil::IsTuple(accumulator_shape)) {
if (ShapeUtil::TupleElementCount(accumulator_shape) != inputs) {
return InvalidArgument(
- "Reduction function must produce a tuple with %lld elements, but has "
- "%lld elements",
+ "Reduction function must produce a tuple with %d elements, but has "
+ "%d elements",
inputs, ShapeUtil::TupleElementCount(accumulator_shape));
}
for (const Shape& element_shape : accumulator_shape.tuple_shapes()) {
@@ -94,7 +93,7 @@ Status VerifyReducerShape(
return InvalidArgument(
"Reduction function must produce a scalar or tuple of scalars, but has "
"shape: %s",
- ShapeUtil::HumanString(accumulator_shape).c_str());
+ ShapeUtil::HumanString(accumulator_shape));
}
for (const Shape* element_shape : accumulator_subshapes) {
@@ -102,7 +101,7 @@ Status VerifyReducerShape(
return InvalidArgument(
"Reduction function must return a scalar or tuple of scalars but "
"returns shape: %s",
- ShapeUtil::HumanString(accumulator_shape).c_str());
+ ShapeUtil::HumanString(accumulator_shape));
}
}
@@ -113,19 +112,19 @@ Status VerifyReducerShape(
if (!ShapeUtil::Compatible(*accumulator_subshapes[i],
reducer_shape.parameters(i))) {
return InvalidArgument(
- "Reduction function's %lld-th parameter shape differs from the "
+ "Reduction function's %d-th parameter shape differs from the "
"result shape: %s vs %s",
- i, ShapeUtil::HumanString(reducer_shape.parameters(i)).c_str(),
- ShapeUtil::HumanString(*accumulator_subshapes[i]).c_str());
+ i, ShapeUtil::HumanString(reducer_shape.parameters(i)),
+ ShapeUtil::HumanString(*accumulator_subshapes[i]));
}
// Check that init_value's shapes are suitable for reducer_shape.
if (!ShapeUtil::CompatibleIgnoringFpPrecision(*accumulator_subshapes[i],
*init_value_shapes[i])) {
return InvalidArgument(
- "Reduction function's accumulator shape at index %lld differs from "
+ "Reduction function's accumulator shape at index %d differs from "
"the init_value shape: %s vs %s",
- i, ShapeUtil::HumanString(*accumulator_subshapes[i]).c_str(),
- ShapeUtil::HumanString(*init_value_shapes[i]).c_str());
+ i, ShapeUtil::HumanString(*accumulator_subshapes[i]),
+ ShapeUtil::HumanString(*init_value_shapes[i]));
}
// Check that the inputs can be passed in as the non-accumulator arguments.
const Shape input_element_shape =
@@ -133,11 +132,11 @@ Status VerifyReducerShape(
if (!ShapeUtil::CompatibleIgnoringFpPrecision(
input_element_shape, reducer_shape.parameters(inputs + i))) {
return InvalidArgument(
- "Reduction function's %lld-th parameter shape differs from the "
+ "Reduction function's %d-th parameter shape differs from the "
"input type element type: %s vs %s",
inputs + i,
- ShapeUtil::HumanString(reducer_shape.parameters(inputs + i)).c_str(),
- ShapeUtil::HumanString(input_element_shape).c_str());
+ ShapeUtil::HumanString(reducer_shape.parameters(inputs + i)),
+ ShapeUtil::HumanString(input_element_shape));
}
// Check that the accumulator and inputs to the reducer function match.
// If the accumulator is scalar, it must have the same type as the inputs
@@ -147,11 +146,11 @@ Status VerifyReducerShape(
if (!ShapeUtil::CompatibleIgnoringFpPrecision(
*accumulator_subshapes[i], reducer_shape.parameters(inputs + i))) {
return InvalidArgument(
- "Reduction function's %lld-th parameter shape must "
+ "Reduction function's %d-th parameter shape must "
"match the result shape, but got %s vs %s.",
inputs + i,
- ShapeUtil::HumanString(reducer_shape.parameters(inputs + i)).c_str(),
- ShapeUtil::HumanString(*accumulator_subshapes[i]).c_str());
+ ShapeUtil::HumanString(reducer_shape.parameters(inputs + i)),
+ ShapeUtil::HumanString(*accumulator_subshapes[i]));
}
}
@@ -164,7 +163,7 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
bool allow_negative_padding) {
if (window.dimensions_size() != ShapeUtil::Rank(base_shape)) {
return InvalidArgument(
- "Window has dimension %d but base shape has dimension %lld.",
+ "Window has dimension %d but base shape has dimension %d.",
window.dimensions_size(), ShapeUtil::Rank(base_shape));
}
@@ -173,29 +172,29 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
const auto& dim = window.dimensions(i);
if (dim.size() <= 0) {
return InvalidArgument("Window %s has a non-positive dimension.",
- window.DebugString().c_str());
+ window.DebugString());
}
if (dim.stride() <= 0) {
return InvalidArgument("Window %s has a non-positive stride.",
- window.DebugString().c_str());
+ window.DebugString());
}
if (!allow_negative_padding && dim.padding_low() < 0) {
return InvalidArgument("Window %s has a negative low padding.",
- window.DebugString().c_str());
+ window.DebugString());
}
if (!allow_negative_padding && dim.padding_high() < 0) {
return InvalidArgument("Window %s has a negative high padding.",
- window.DebugString().c_str());
+ window.DebugString());
}
if (dim.base_dilation() < 1) {
return InvalidArgument(
"Window %s has a non-positive base area dilation factor.",
- window.DebugString().c_str());
+ window.DebugString());
}
if (dim.window_dilation() < 1) {
return InvalidArgument(
"Window %s has a non-positive window dilation factor.",
- window.DebugString().c_str());
+ window.DebugString());
}
const int64 dilated_base = window_util::DilatedBound(
@@ -238,8 +237,7 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
return InvalidArgument(
"Expected element type in shape to be floating for %s operation; "
"got %s.",
- HloOpcodeString(opcode).c_str(),
- PrimitiveType_Name(shape.element_type()).c_str());
+ HloOpcodeString(opcode), PrimitiveType_Name(shape.element_type()));
}
return shape;
case HloOpcode::kCos:
@@ -254,8 +252,7 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
return InvalidArgument(
"Expected element type in shape to be floating or complex for %s "
"operation; got %s.",
- HloOpcodeString(opcode).c_str(),
- PrimitiveType_Name(shape.element_type()).c_str());
+ HloOpcodeString(opcode), PrimitiveType_Name(shape.element_type()));
}
return shape;
case HloOpcode::kReal:
@@ -268,8 +265,7 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
return InvalidArgument(
"Expected element type in shape to be floating or complex for "
"%s operation; got %s.",
- HloOpcodeString(opcode).c_str(),
- PrimitiveType_Name(shape.element_type()).c_str());
+ HloOpcodeString(opcode), PrimitiveType_Name(shape.element_type()));
}
case HloOpcode::kAbs:
if (ShapeUtil::ElementIsComplex(shape)) {
@@ -281,15 +277,14 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
return InvalidArgument(
"Expected element type in shape to be floating or complex for "
"%s operation; got %s.",
- HloOpcodeString(opcode).c_str(),
- PrimitiveType_Name(shape.element_type()).c_str());
+ HloOpcodeString(opcode), PrimitiveType_Name(shape.element_type()));
}
case HloOpcode::kClz:
if (!ShapeUtil::ElementIsIntegral(shape)) {
return InvalidArgument(
"Expected an integral element type in argument to Clz "
"operation; got %s.",
- PrimitiveType_Name(shape.element_type()).c_str());
+ PrimitiveType_Name(shape.element_type()));
}
return shape;
case HloOpcode::kNegate:
@@ -299,8 +294,7 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
return InvalidArgument(
"Expected element type in shape to be integral, floating or "
"complex for %s operation; got %s.",
- HloOpcodeString(opcode).c_str(),
- PrimitiveType_Name(shape.element_type()).c_str());
+ HloOpcodeString(opcode), PrimitiveType_Name(shape.element_type()));
}
return shape;
case HloOpcode::kSign:
@@ -309,8 +303,7 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
return InvalidArgument(
"Expected element type in shape to be signed or complex for "
"%s operation; got %s.",
- HloOpcodeString(opcode).c_str(),
- PrimitiveType_Name(shape.element_type()).c_str());
+ HloOpcodeString(opcode), PrimitiveType_Name(shape.element_type()));
}
return shape;
@@ -320,7 +313,7 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
return InvalidArgument(
"Expected pred or an integral element type in argument to Not "
"operation; got %s.",
- PrimitiveType_Name(shape.element_type()).c_str());
+ PrimitiveType_Name(shape.element_type()));
}
return shape;
@@ -330,14 +323,14 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
"Expected element type in shape to be floating "
"point for IsFinite "
"operation; got %s.",
- PrimitiveType_Name(shape.element_type()).c_str());
+ PrimitiveType_Name(shape.element_type()));
}
return ShapeUtil::ChangeElementType(shape, PRED);
default:
return InvalidArgument(
"Unknown operation for unary shape inference: \"%s\".",
- HloOpcodeString(opcode).c_str());
+ HloOpcodeString(opcode));
}
}
@@ -348,7 +341,7 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
return InvalidArgument("Concatenate expects at least one argument.");
}
if (dimension < 0 || dimension >= ShapeUtil::Rank(*arg_shapes[0])) {
- return InvalidArgument("Concatenate dimension out of bounds: %lld.",
+ return InvalidArgument("Concatenate dimension out of bounds: %d.",
dimension);
}
const Shape* arg_shape = nullptr;
@@ -362,17 +355,16 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
}
if (ShapeUtil::Rank(*arg_shape) != ShapeUtil::Rank(*shape)) {
return InvalidArgument(
- "Cannot concatenate arrays with different ranks: %lld (%s) vs %lld "
+ "Cannot concatenate arrays with different ranks: %d (%s) vs %d "
"(%s).",
- ShapeUtil::Rank(*arg_shape),
- ShapeUtil::HumanString(*arg_shape).c_str(), ShapeUtil::Rank(*shape),
- ShapeUtil::HumanString(*shape).c_str());
+ ShapeUtil::Rank(*arg_shape), ShapeUtil::HumanString(*arg_shape),
+ ShapeUtil::Rank(*shape), ShapeUtil::HumanString(*shape));
}
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(*arg_shape, *shape)) {
return InvalidArgument(
"Cannot concatenate arrays with different element types: %s vs %s.",
- PrimitiveType_Name(arg_shape->element_type()).c_str(),
- PrimitiveType_Name(shape->element_type()).c_str());
+ PrimitiveType_Name(arg_shape->element_type()),
+ PrimitiveType_Name(shape->element_type()));
}
for (int64 dimension_number = 0;
dimension_number < ShapeUtil::Rank(*arg_shape); ++dimension_number) {
@@ -385,9 +377,9 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
return InvalidArgument(
"Cannot concatenate arrays that differ in dimensions other than "
"the one being concatenated (the other array dimensions must be "
- "the same): %s vs %s in dimension %lld.",
- ShapeUtil::HumanString(*arg_shape).c_str(),
- ShapeUtil::HumanString(*shape).c_str(), dimension);
+ "the same): %s vs %s in dimension %d.",
+ ShapeUtil::HumanString(*arg_shape), ShapeUtil::HumanString(*shape),
+ dimension);
}
}
element_type = ShapeUtil::HigherPrecisionElementType(*shape, *arg_shape);
@@ -419,8 +411,8 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
!primitive_util::IsComplexType(new_element_type)) {
return Unimplemented(
"Conversion from complex to real type %s => %s is not implemented.",
- ShapeUtil::HumanString(operand_shape).c_str(),
- PrimitiveType_Name(new_element_type).c_str());
+ ShapeUtil::HumanString(operand_shape),
+ PrimitiveType_Name(new_element_type));
}
if (!ShapeUtil::IsArray(operand_shape) ||
!primitive_util::IsArrayType(new_element_type)) {
@@ -429,8 +421,8 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
// are valid. For now we just reject them, though.
return InvalidArgument(
"Convert does not allow non-arrays, so cannot convert from %s to %s.",
- ShapeUtil::HumanString(operand_shape).c_str(),
- PrimitiveType_Name(new_element_type).c_str());
+ ShapeUtil::HumanString(operand_shape),
+ PrimitiveType_Name(new_element_type));
}
return ShapeUtil::ChangeElementType(operand_shape, new_element_type);
@@ -442,8 +434,8 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
if (primitive_util::IsComplexType(old_element_type) !=
primitive_util::IsComplexType(new_element_type)) {
return InvalidArgument("Conversion from complex to real type %s => %s.",
- ShapeUtil::HumanString(operand_shape).c_str(),
- PrimitiveType_Name(new_element_type).c_str());
+ ShapeUtil::HumanString(operand_shape),
+ PrimitiveType_Name(new_element_type));
}
if (!ShapeUtil::IsArray(operand_shape) ||
!primitive_util::IsArrayType(new_element_type)) {
@@ -452,15 +444,15 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
// are valid. For now we just reject them, though.
return InvalidArgument(
"Cannot convert from or to tuple type; requested conversion: %s => %s.",
- ShapeUtil::HumanString(operand_shape).c_str(),
- PrimitiveType_Name(new_element_type).c_str());
+ ShapeUtil::HumanString(operand_shape),
+ PrimitiveType_Name(new_element_type));
}
if (primitive_util::BitWidth(old_element_type) !=
primitive_util::BitWidth(new_element_type)) {
return InvalidArgument(
"Cannot bitcast types with different bit-widths: %s => %s.",
- PrimitiveType_Name(old_element_type).c_str(),
- PrimitiveType_Name(new_element_type).c_str());
+ PrimitiveType_Name(old_element_type),
+ PrimitiveType_Name(new_element_type));
}
return ShapeUtil::ChangeElementType(operand_shape, new_element_type);
@@ -473,7 +465,7 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
return InvalidArgument(
"Expected element type in shape to be floating point for "
"ReducePrecision operation; got %s.",
- PrimitiveType_Name(operand_shape.element_type()).c_str());
+ PrimitiveType_Name(operand_shape.element_type()));
}
if (exponent_bits < 1) {
// One exponent bit is necessary to distinguish 0 from infinity. Having
@@ -505,8 +497,8 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
return InvalidArgument(
"The rank of the operand and the padding configuration do not match: "
"%s vs %s.",
- ShapeUtil::HumanString(operand_shape).c_str(),
- padding_config.ShortDebugString().c_str());
+ ShapeUtil::HumanString(operand_shape),
+ padding_config.ShortDebugString());
}
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(operand_shape,
padding_value_shape)) {
@@ -573,7 +565,7 @@ Status ValidateDotDimensionNumbers(
!dims_in_range(ShapeUtil::Rank(rhs), rhs_contracting_dimensions,
rhs_batch_dimensions)) {
return InvalidArgument("A dimension number is out of range in Dot: %s.",
- dimension_numbers.DebugString().c_str());
+ dimension_numbers.DebugString());
}
// Check that dimension numbers are unique.
@@ -591,7 +583,7 @@ Status ValidateDotDimensionNumbers(
if (!dims_unique(lhs_contracting_dimensions, lhs_batch_dimensions) ||
!dims_unique(rhs_contracting_dimensions, rhs_batch_dimensions)) {
return InvalidArgument("A dimension number is not unique in Dot: %s.",
- dimension_numbers.DebugString().c_str());
+ dimension_numbers.DebugString());
}
// Check that the count of non-contracting-non-batch dimensions is in {0, 1}.
@@ -636,14 +628,13 @@ Status ValidateDotDimensionNumbers(
TF_RETURN_IF_ERROR(ExpectArray(rhs, "rhs of dot"));
auto fail = [lhs, rhs](const string& addendum) -> Status {
- string message = tensorflow::strings::Printf(
- "Cannot infer shape for dot operation: %s <dot> %s.",
- ShapeUtil::HumanString(lhs).c_str(),
- ShapeUtil::HumanString(rhs).c_str());
+ string message =
+ StrFormat("Cannot infer shape for dot operation: %s <dot> %s.",
+ ShapeUtil::HumanString(lhs), ShapeUtil::HumanString(rhs));
if (!addendum.empty()) {
message += " " + addendum;
}
- return InvalidArgument("%s", message.c_str());
+ return InvalidArgument("%s", message);
};
// Check if both element types are the same.
@@ -739,9 +730,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
} else {
return InvalidArgument(
"Binary op %s with incompatible shapes: %s and %s.",
- HloOpcodeString(operation).c_str(),
- ShapeUtil::HumanString(lhs).c_str(),
- ShapeUtil::HumanString(rhs).c_str());
+ HloOpcodeString(operation), ShapeUtil::HumanString(lhs),
+ ShapeUtil::HumanString(rhs));
}
}
return ShapeUtil::MakeShape(ShapeUtil::HigherPrecisionElementType(lhs, rhs),
@@ -756,14 +746,14 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
// the user to provide an explicit broadcast dimension in this case.
// See b/25177275 for more details.
return InvalidArgument("Automatic shape inference not supported: %s and %s",
- ShapeUtil::HumanString(smaller_shape).c_str(),
- ShapeUtil::HumanString(larger_shape).c_str());
+ ShapeUtil::HumanString(smaller_shape),
+ ShapeUtil::HumanString(larger_shape));
} else if (broadcast_dimensions.size() != ShapeUtil::Rank(smaller_shape)) {
return InvalidArgument(
"Size of broadcast_dimensions has to match lower-rank operand's "
"rank; "
- " lower-rank operand's rank is %lld, size of broadcast_dimensions is "
- "%zu.",
+ " lower-rank operand's rank is %d, size of broadcast_dimensions is "
+ "%u.",
ShapeUtil::Rank(smaller_shape), broadcast_dimensions.size());
}
@@ -813,12 +803,12 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
int64 dimension_to_match = broadcast_dimensions.at(i);
if (dimension_to_match < 0) {
return InvalidArgument(
- "Broadcast dimension number (%lld) cannot be negative.",
+ "Broadcast dimension number (%d) cannot be negative.",
dimension_to_match);
}
if (dimension_to_match >= larger_shape.dimensions_size()) {
return InvalidArgument(
- "Broadcast dimension number (%lld) too large; higher-rank "
+ "Broadcast dimension number (%d) too large; higher-rank "
"operand has rank %d.",
dimension_to_match, larger_shape.dimensions_size());
}
@@ -830,16 +820,16 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (small_dimension_size != large_dimension_size &&
small_dimension_size != 1 && large_dimension_size != 1) {
return InvalidArgument(
- "Broadcast dimension %d mismatch: %lld != %lld; %s and %s.", i,
+ "Broadcast dimension %d mismatch: %d != %d; %s and %s.", i,
small_dimension_size, large_dimension_size,
- ShapeUtil::HumanString(smaller_shape).c_str(),
- ShapeUtil::HumanString(larger_shape).c_str());
+ ShapeUtil::HumanString(smaller_shape),
+ ShapeUtil::HumanString(larger_shape));
}
// Make sure the broadcast dimensions are listed in a strictly increasing
// order.
if (i > 0 && broadcast_dimensions.at(i - 1) >= dimension_to_match) {
return InvalidArgument(
- "Broadcast dimensions order is wrong: %lld comes after %lld.",
+ "Broadcast dimensions order is wrong: %d comes after %d.",
dimension_to_match, broadcast_dimensions.at(i - 1));
}
@@ -858,8 +848,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(lhs, rhs)) {
return InvalidArgument(
"Binary op %s with different element types: %s and %s.",
- HloOpcodeString(operation).c_str(), ShapeUtil::HumanString(lhs).c_str(),
- ShapeUtil::HumanString(rhs).c_str());
+ HloOpcodeString(operation), ShapeUtil::HumanString(lhs),
+ ShapeUtil::HumanString(rhs));
}
if (ShapeUtil::Rank(lhs) == ShapeUtil::Rank(rhs)) {
@@ -909,11 +899,10 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
/* static */ StatusOr<Shape> ShapeInference::InferBinaryOpShape(
HloOpcode opcode, const Shape& lhs, const Shape& rhs,
tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
- VLOG(2) << tensorflow::strings::Printf(
+ VLOG(2) << StrFormat(
"inferring shape for <%s>(%s, %s) with broadcast_dimensions={%s}",
- HloOpcodeString(opcode).c_str(), ShapeUtil::HumanString(lhs).c_str(),
- ShapeUtil::HumanString(rhs).c_str(),
- StrJoin(broadcast_dimensions, ", ").c_str());
+ HloOpcodeString(opcode), ShapeUtil::HumanString(lhs),
+ ShapeUtil::HumanString(rhs), StrJoin(broadcast_dimensions, ", "));
TF_DCHECK_OK(ShapeUtil::ValidateShapeWithOptionalLayout(lhs));
TF_DCHECK_OK(ShapeUtil::ValidateShapeWithOptionalLayout(rhs));
@@ -942,7 +931,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Expected element type in shape to be floating for complex compose "
"operation; got %s.",
- PrimitiveType_Name(lhs.element_type()).c_str());
+ PrimitiveType_Name(lhs.element_type()));
}
TF_ASSIGN_OR_RETURN(const Shape& shape,
InferElementwiseBinaryOpShape(opcode, lhs, rhs,
@@ -961,7 +950,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Expected pred or integral type in argument to and/or operation; "
"got %s.",
- PrimitiveType_Name(lhs.element_type()).c_str());
+ PrimitiveType_Name(lhs.element_type()));
}
return InferElementwiseBinaryOpShape(opcode, lhs, rhs,
broadcast_dimensions);
@@ -979,8 +968,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
default:
return Unimplemented(
"Binary op shape inference: %s; lhs: %s; rhs: %s is not implemented.",
- HloOpcodeString(opcode).c_str(), lhs.ShortDebugString().c_str(),
- rhs.ShortDebugString().c_str());
+ HloOpcodeString(opcode), lhs.ShortDebugString(),
+ rhs.ShortDebugString());
}
}
@@ -1003,8 +992,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
case HloOpcode::kTupleSelect:
return InferTupleSelectShape(lhs, rhs, ehs);
default:
- return InvalidArgument("Unknown operation %s.",
- HloOpcodeString(opcode).c_str());
+ return InvalidArgument("Unknown operation %s.", HloOpcodeString(opcode));
}
}
@@ -1043,8 +1031,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Sort keys and values dimensions must match. "
"Keys shape is: %s\n, Values shape is: %s",
- ShapeUtil::HumanString(*operand_shapes[0]).c_str(),
- ShapeUtil::HumanString(*operand_shapes[1]).c_str());
+ ShapeUtil::HumanString(*operand_shapes[0]),
+ ShapeUtil::HumanString(*operand_shapes[1]));
}
return ShapeUtil::MakeTupleShape(
{*operand_shapes[0], *operand_shapes[1]});
@@ -1052,8 +1040,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument("Unexpected number of operands for sort");
}
default:
- return InvalidArgument("Unknown operation %s.",
- HloOpcodeString(opcode).c_str());
+ return InvalidArgument("Unknown operation %s.", HloOpcodeString(opcode));
}
}
@@ -1091,7 +1078,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Map operation requires all operands to have the same shape; got: "
"%s.",
- StrJoin(pieces, ", ").c_str());
+ StrJoin(pieces, ", "));
}
// Check that dimensions.size == arg_shape.dimensions_size() (we currently
@@ -1099,7 +1086,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (dimensions.size() != arg_shape->dimensions_size()) {
return InvalidArgument(
"Map applied to a subset of dimensions currently not supported: "
- "arg_dimension_size: %d, requested_map_dimensions_size: %zu.",
+ "arg_dimension_size: %d, requested_map_dimensions_size: %u.",
arg_shape->dimensions_size(), dimensions.size());
}
@@ -1108,7 +1095,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (dimensions[i] != i) {
return InvalidArgument(
"Map requires monotonically increasing dimension numbers; got: %s.",
- StrJoin(dimensions, ", ").c_str());
+ StrJoin(dimensions, ", "));
}
}
@@ -1116,7 +1103,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (arg_shapes.size() != to_apply.parameters_size()) {
return InvalidArgument(
"Map applied function arity must match number of arguments; got: "
- "arity: %d, arguments: %zu.",
+ "arity: %d, arguments: %u.",
to_apply.parameters_size(), arg_shapes.size());
}
@@ -1125,7 +1112,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (!ShapeUtil::IsScalar(output_shape)) {
return InvalidArgument(
"Mapped computation's result has to be a scalar; got: %s.",
- ShapeUtil::HumanString(output_shape).c_str());
+ ShapeUtil::HumanString(output_shape));
}
for (int i = 0; i < to_apply.parameters_size(); ++i) {
@@ -1135,7 +1122,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Mapped computation's parameter has to be a scalar; "
"got parameter %d shape: %s.",
- i, ShapeUtil::HumanString(parameter_shape).c_str());
+ i, ShapeUtil::HumanString(parameter_shape));
}
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(parameter_shape,
@@ -1143,8 +1130,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Mapped computation's parameter type has to match argument element "
"type; got parameter %d shape: %s, argument shape: %s.",
- i, ShapeUtil::HumanString(parameter_shape).c_str(),
- ShapeUtil::HumanString(*arg_shape).c_str());
+ i, ShapeUtil::HumanString(parameter_shape),
+ ShapeUtil::HumanString(*arg_shape));
}
}
@@ -1173,35 +1160,35 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Expected feature_index of batch-norm-training to be "
"smaller than the rank of operand_shape; "
- "got feature_index %lld, and rank %lld.",
+ "got feature_index %d, and rank %d.",
feature_index, ShapeUtil::Rank(operand_shape));
}
if (feature_index < 0) {
return InvalidArgument(
"Expected feature_index of batch-norm-training to "
- "be a non-negative number, got %lld.",
+ "be a non-negative number, got %d.",
feature_index);
}
if (ShapeUtil::Rank(operand_shape) < 1) {
return InvalidArgument(
"Expected the rank of operand to "
- "batch-norm-training to be at least 1; got %lld.",
+ "batch-norm-training to be at least 1; got %d.",
ShapeUtil::Rank(operand_shape));
}
if (ShapeUtil::Rank(offset_shape) != 1) {
return InvalidArgument(
"Offset input of batch-norm-training must have"
- " rank 1, but has rank %lld.",
+ " rank 1, but has rank %d.",
ShapeUtil::Rank(offset_shape));
}
if (ShapeUtil::Rank(scale_shape) != 1) {
return InvalidArgument(
"Scale input of batch-norm-training must have"
- " rank 1, but has rank %lld.",
+ " rank 1, but has rank %d.",
ShapeUtil::Rank(scale_shape));
}
@@ -1209,7 +1196,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"The operand to batch-norm-training must have a floating point "
"element type, but the shape is %s.",
- PrimitiveType_Name(operand_shape.element_type()).c_str());
+ PrimitiveType_Name(operand_shape.element_type()));
}
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(offset_shape,
@@ -1218,8 +1205,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
"The inputs should have the same element type for batch-norm-training, "
"but the shape of offset factor is %s "
"and the shape of operand is %s.",
- PrimitiveType_Name(offset_shape.element_type()).c_str(),
- PrimitiveType_Name(operand_shape.element_type()).c_str());
+ PrimitiveType_Name(offset_shape.element_type()),
+ PrimitiveType_Name(operand_shape.element_type()));
}
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(scale_shape,
@@ -1228,8 +1215,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
"The inputs should have the same element type for batch-norm-training, "
"but the shape of scale factor is %s "
"and the shape of operand is %s.",
- PrimitiveType_Name(scale_shape.element_type()).c_str(),
- PrimitiveType_Name(operand_shape.element_type()).c_str());
+ PrimitiveType_Name(scale_shape.element_type()),
+ PrimitiveType_Name(operand_shape.element_type()));
}
const int64 feature_count = operand_shape.dimensions(feature_index);
@@ -1239,16 +1226,16 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (ShapeUtil::GetDimension(offset_shape, 0) != feature_count) {
return InvalidArgument(
"The size of offset factor should be the same as feature count,"
- "but the size of offset factor is %lld "
- "and the feature count is %lld.",
+ "but the size of offset factor is %d "
+ "and the feature count is %d.",
ShapeUtil::GetDimension(offset_shape, 0), feature_count);
}
if (ShapeUtil::GetDimension(scale_shape, 0) != feature_count) {
return InvalidArgument(
"The size of scale factor should be the same as feature count,"
- "but the size of scale factor is %lld "
- "and the feature count is %lld.",
+ "but the size of scale factor is %d "
+ "and the feature count is %d.",
ShapeUtil::GetDimension(scale_shape, 0), feature_count);
}
@@ -1283,35 +1270,35 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Expected feature_index of batch-norm-inference to be "
"smaller than the rank of operand_shape; "
- "got feature_index %lld, and rank %lld.",
+ "got feature_index %d, and rank %d.",
feature_index, ShapeUtil::Rank(operand_shape));
}
if (feature_index < 0) {
return InvalidArgument(
"Expected feature_index of batch-norm-inference to "
- "be a non-negative number, got %lld.",
+ "be a non-negative number, got %d.",
feature_index);
}
if (ShapeUtil::Rank(operand_shape) < 1) {
return InvalidArgument(
"Expected the rank of operand to "
- "batch-norm-inference to be at least 1; got %lld.",
+ "batch-norm-inference to be at least 1; got %d.",
ShapeUtil::Rank(operand_shape));
}
if (ShapeUtil::Rank(offset_shape) != 1) {
return InvalidArgument(
"Offset input of batch-norm-inference must have"
- " rank 1, but has rank %lld.",
+ " rank 1, but has rank %d.",
ShapeUtil::Rank(offset_shape));
}
if (ShapeUtil::Rank(scale_shape) != 1) {
return InvalidArgument(
"Scale input of batch-norm-inference must have"
- " rank 1, but has rank %lld.",
+ " rank 1, but has rank %d.",
ShapeUtil::Rank(scale_shape));
}
@@ -1319,7 +1306,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"The operand to batch-norm-inference must have a floating point "
"element type, but the shape is %s.",
- PrimitiveType_Name(operand_shape.element_type()).c_str());
+ PrimitiveType_Name(operand_shape.element_type()));
}
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(offset_shape,
@@ -1329,8 +1316,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
"batch-norm-inference, "
"but the shape of offset factor is %s "
"and the shape of operand is %s.",
- PrimitiveType_Name(offset_shape.element_type()).c_str(),
- PrimitiveType_Name(operand_shape.element_type()).c_str());
+ PrimitiveType_Name(offset_shape.element_type()),
+ PrimitiveType_Name(operand_shape.element_type()));
}
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(scale_shape,
@@ -1340,8 +1327,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
"batch-norm-inference, "
"but the shape of scale factor is %s "
"and the shape of operand is %s.",
- PrimitiveType_Name(scale_shape.element_type()).c_str(),
- PrimitiveType_Name(operand_shape.element_type()).c_str());
+ PrimitiveType_Name(scale_shape.element_type()),
+ PrimitiveType_Name(operand_shape.element_type()));
}
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(mean_shape,
@@ -1351,8 +1338,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
"batch-norm-inference, "
"but the shape of mean is %s "
"and the shape of operand is %s.",
- PrimitiveType_Name(mean_shape.element_type()).c_str(),
- PrimitiveType_Name(operand_shape.element_type()).c_str());
+ PrimitiveType_Name(mean_shape.element_type()),
+ PrimitiveType_Name(operand_shape.element_type()));
}
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(variance_shape,
@@ -1362,8 +1349,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
"batch-norm-inference, "
"but the shape of variance is %s "
"and the shape of operand is %s.",
- PrimitiveType_Name(mean_shape.element_type()).c_str(),
- PrimitiveType_Name(variance_shape.element_type()).c_str());
+ PrimitiveType_Name(mean_shape.element_type()),
+ PrimitiveType_Name(variance_shape.element_type()));
}
const int64 feature_count = operand_shape.dimensions(feature_index);
@@ -1373,32 +1360,32 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (ShapeUtil::GetDimension(offset_shape, 0) != feature_count) {
return InvalidArgument(
"The size of offset factor should be the same as feature count,"
- "but the size of offset factor is %lld "
- "and the feature count is %lld.",
+ "but the size of offset factor is %d "
+ "and the feature count is %d.",
ShapeUtil::GetDimension(offset_shape, 0), feature_count);
}
if (ShapeUtil::GetDimension(scale_shape, 0) != feature_count) {
return InvalidArgument(
"The size of scale factor should be the same as feature count,"
- "but the size of scale factor is %lld "
- "and the feature count is %lld.",
+ "but the size of scale factor is %d "
+ "and the feature count is %d.",
ShapeUtil::GetDimension(scale_shape, 0), feature_count);
}
if (ShapeUtil::GetDimension(mean_shape, 0) != feature_count) {
return InvalidArgument(
"The size of mean should be the same as feature count,"
- "but the size of mean is %lld "
- "and the feature count is %lld.",
+ "but the size of mean is %d "
+ "and the feature count is %d.",
ShapeUtil::GetDimension(mean_shape, 0), feature_count);
}
if (ShapeUtil::GetDimension(variance_shape, 0) != feature_count) {
return InvalidArgument(
"The size of variance should be the same as feature count,"
- "but the size of variance is %lld "
- "and the feature count is %lld.",
+ "but the size of variance is %d "
+ "and the feature count is %d.",
ShapeUtil::GetDimension(variance_shape, 0), feature_count);
}
@@ -1428,36 +1415,36 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Expected feature_index of batch-norm-grad to be "
"smaller than the rank of operand_shape; "
- "got feature_index %lld, and rank %lld.",
+ "got feature_index %d, and rank %d.",
feature_index, ShapeUtil::Rank(operand_shape));
}
if (ShapeUtil::Rank(operand_shape) != ShapeUtil::Rank(output_grad_shape)) {
return InvalidArgument(
"Expected operand_shape of batch-norm-grad to have the same rank as"
- " output_grad_shape; got rank(oprand_shape) %lld, and"
- " rank(output_grad_shape) %lld.",
+ " output_grad_shape; got rank(oprand_shape) %d, and"
+ " rank(output_grad_shape) %d.",
ShapeUtil::Rank(operand_shape), ShapeUtil::Rank(output_grad_shape));
}
if (ShapeUtil::Rank(mean_shape) != 1) {
return InvalidArgument(
"Mean input of batch-norm-grad must have"
- " rank 1, but has rank %lld.",
+ " rank 1, but has rank %d.",
ShapeUtil::Rank(mean_shape));
}
if (ShapeUtil::Rank(scale_shape) != 1) {
return InvalidArgument(
"Scale input of batch-norm-grad must have"
- " rank 1, but has rank %lld.",
+ " rank 1, but has rank %d.",
ShapeUtil::Rank(scale_shape));
}
if (ShapeUtil::Rank(var_shape) != 1) {
return InvalidArgument(
"Var input of batch-norm-grad must have"
- " rank 1, but has rank %lld.",
+ " rank 1, but has rank %d.",
ShapeUtil::Rank(var_shape));
}
@@ -1465,14 +1452,14 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"The operand to batch-norm-grad must have a floating point "
"element type, but the shape is %s.",
- PrimitiveType_Name(operand_shape.element_type()).c_str());
+ PrimitiveType_Name(operand_shape.element_type()));
}
if (!ShapeUtil::ElementIsFloating(output_grad_shape)) {
return InvalidArgument(
"The output_grad to batch-norm-grad must have a floating point "
"element type, but the shape is %s.",
- PrimitiveType_Name(output_grad_shape.element_type()).c_str());
+ PrimitiveType_Name(output_grad_shape.element_type()));
}
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(output_grad_shape,
@@ -1481,8 +1468,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
"The inputs should have the same element type for batch-norm-grad, "
"but the element type of output_grad is %s "
"and the element type of operand is %s.",
- PrimitiveType_Name(output_grad_shape.element_type()).c_str(),
- PrimitiveType_Name(operand_shape.element_type()).c_str());
+ PrimitiveType_Name(output_grad_shape.element_type()),
+ PrimitiveType_Name(operand_shape.element_type()));
}
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(scale_shape,
@@ -1491,8 +1478,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
"The inputs should have the same element type for batch-norm-grad, "
"but the element type of scale factor is %s "
"and the element type of operand is %s.",
- PrimitiveType_Name(scale_shape.element_type()).c_str(),
- PrimitiveType_Name(operand_shape.element_type()).c_str());
+ PrimitiveType_Name(scale_shape.element_type()),
+ PrimitiveType_Name(operand_shape.element_type()));
}
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(mean_shape,
@@ -1501,8 +1488,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
"The inputs should have the same element type for batch-norm-grad, "
"but the element type of mean is %s "
"and the element type of operand is %s.",
- PrimitiveType_Name(mean_shape.element_type()).c_str(),
- PrimitiveType_Name(operand_shape.element_type()).c_str());
+ PrimitiveType_Name(mean_shape.element_type()),
+ PrimitiveType_Name(operand_shape.element_type()));
}
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(var_shape,
@@ -1511,8 +1498,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
"The inputs should have the same element type for batch-norm-grad, "
"but the element type of mean is %s "
"and the element type of operand is %s.",
- PrimitiveType_Name(mean_shape.element_type()).c_str(),
- PrimitiveType_Name(operand_shape.element_type()).c_str());
+ PrimitiveType_Name(mean_shape.element_type()),
+ PrimitiveType_Name(operand_shape.element_type()));
}
const int64 feature_count = operand_shape.dimensions(feature_index);
@@ -1523,24 +1510,24 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (ShapeUtil::GetDimension(mean_shape, 0) != feature_count) {
return InvalidArgument(
"The size of mean should be the same as feature count,"
- "but the size of offset factor is %lld "
- "and the feature count is %lld.",
+ "but the size of offset factor is %d "
+ "and the feature count is %d.",
ShapeUtil::GetDimension(mean_shape, 0), feature_count);
}
if (ShapeUtil::GetDimension(scale_shape, 0) != feature_count) {
return InvalidArgument(
"The size of scale factor should be the same as feature count,"
- "but the size of scale factor is %lld "
- "and the feature count is %lld.",
+ "but the size of scale factor is %d "
+ "and the feature count is %d.",
ShapeUtil::GetDimension(scale_shape, 0), feature_count);
}
if (ShapeUtil::GetDimension(var_shape, 0) != feature_count) {
return InvalidArgument(
"The size of variance should be the same as feature count,"
- "but the size of variance is %lld "
- "and the feature count is %lld.",
+ "but the size of variance is %d "
+ "and the feature count is %d.",
ShapeUtil::GetDimension(var_shape, 0), feature_count);
}
@@ -1550,8 +1537,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
ShapeUtil::GetDimension(output_grad_shape, i)) {
return InvalidArgument(
"The bounds of operand shape should be the same as output_grad's,"
- "but the bound of operand_shape at dimension %lld is %lld "
- "and the bound of output_grad_shape is %lld.",
+ "but the bound of operand_shape at dimension %d is %d "
+ "and the bound of output_grad_shape is %d.",
i, ShapeUtil::GetDimension(operand_shape, i),
ShapeUtil::GetDimension(output_grad_shape, i));
}
@@ -1570,15 +1557,14 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(lhs, rhs)) {
return InvalidArgument(
"Convolution with different element types: %s and %s.",
- ShapeUtil::HumanString(lhs).c_str(),
- ShapeUtil::HumanString(rhs).c_str());
+ ShapeUtil::HumanString(lhs), ShapeUtil::HumanString(rhs));
}
if (dnums.input_spatial_dimensions_size() !=
dnums.kernel_spatial_dimensions_size()) {
return InvalidArgument(
"Both arguments to convolution must have same number of dimensions.\n"
"Window: %s",
- window.DebugString().c_str());
+ window.DebugString());
}
const int num_spatial_dims = dnums.input_spatial_dimensions_size();
@@ -1586,19 +1572,19 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Window must have same number of dimensions as dimension numbers.\n"
"Window: %s\nDimension numbers: %s.",
- window.DebugString().c_str(), dnums.DebugString().c_str());
+ window.DebugString(), dnums.DebugString());
}
const int num_dims = num_spatial_dims + 2;
if (ShapeUtil::Rank(lhs) != num_dims) {
return InvalidArgument(
"The LHS argument to a convolution should have rank %d; lhs: %s.",
- num_dims, ShapeUtil::HumanString(lhs).c_str());
+ num_dims, ShapeUtil::HumanString(lhs));
}
if (ShapeUtil::Rank(rhs) != num_dims) {
return InvalidArgument(
"The RHS argument to a convolution should have rank %d; lhs: %s.",
- num_dims, ShapeUtil::HumanString(lhs).c_str());
+ num_dims, ShapeUtil::HumanString(lhs));
}
TF_DCHECK_OK(ShapeUtil::ValidateShapeWithOptionalLayout(lhs));
TF_DCHECK_OK(ShapeUtil::ValidateShapeWithOptionalLayout(rhs));
@@ -1635,26 +1621,26 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
!std::all_of(output_dnums.begin(), output_dnums.end(), in_range)) {
return InvalidArgument(
"A dimension number is out of range in convolution: %s.",
- dnums.DebugString().c_str());
+ dnums.DebugString());
}
if (input_dnums != expected_dnums) {
return InvalidArgument(
"Input dimensions of convolution must contain each dimension exactly "
"once: %s.",
- dnums.DebugString().c_str());
+ dnums.DebugString());
}
if (window_dnums != expected_dnums) {
return InvalidArgument(
"Window dimensions of convolution must contain each dimension exactly "
"once: %s.",
- dnums.DebugString().c_str());
+ dnums.DebugString());
}
if (output_dnums != expected_dnums) {
return InvalidArgument(
"Output dimensions of convolution must contain each dimension exactly "
"once: %s.",
- dnums.DebugString().c_str());
+ dnums.DebugString());
}
std::vector<int64> input_spatial_dims(num_spatial_dims);
@@ -1675,13 +1661,13 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (input_features != kernel_input_features * feature_group_count) {
return InvalidArgument(
- "Expected LHS feature dimension (value %lld) to match RHS "
- "input feature dimension * feature_group_count (value %lld); "
+ "Expected LHS feature dimension (value %d) to match RHS "
+ "input feature dimension * feature_group_count (value %d); "
"got <conv>(%s, %s)\n"
"Dimension numbers: {%s}.",
input_features, kernel_input_features * feature_group_count,
- ShapeUtil::HumanString(lhs).c_str(),
- ShapeUtil::HumanString(rhs).c_str(), dnums.DebugString().c_str());
+ ShapeUtil::HumanString(lhs), ShapeUtil::HumanString(rhs),
+ dnums.DebugString());
}
std::vector<int64> window_dims(num_spatial_dims);
for (int i = 0; i < num_spatial_dims; ++i) {
@@ -1693,8 +1679,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
"RHS shape: %s\n\t"
"Window: {%s}\n\t"
"Dimension numbers: {%s}.",
- ShapeUtil::HumanString(rhs).c_str(), window.ShortDebugString().c_str(),
- dnums.ShortDebugString().c_str());
+ ShapeUtil::HumanString(rhs), window.ShortDebugString(),
+ dnums.ShortDebugString());
}
Shape base_shape =
@@ -1720,29 +1706,29 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
const tensorflow::gtl::ArraySlice<int64> fft_length) {
const int64 fft_rank = fft_length.size();
if (fft_rank < 1 || fft_rank > 3) {
- return InvalidArgument("FFT only supports ranks 1-3; got %lld.", fft_rank);
+ return InvalidArgument("FFT only supports ranks 1-3; got %d.", fft_rank);
}
-#define RET_CHECK_RANK(x) \
- if (x.dimensions_size() < fft_rank) { \
- return InvalidArgument( \
- "FFT of rank %lld requires input of at least " \
- "same rank; got input of rank %d", \
- fft_rank, x.dimensions_size()); \
+#define RET_CHECK_RANK(x) \
+ if (x.dimensions_size() < fft_rank) { \
+ return InvalidArgument( \
+ "FFT of rank %d requires input of at least " \
+ "same rank; got input of rank %d", \
+ fft_rank, x.dimensions_size()); \
}
switch (fft_type) {
case FFT:
case IFFT:
if (in.element_type() != C64) {
return InvalidArgument("%s requires C64 input type, found %s.",
- FftType_Name(fft_type).c_str(),
- PrimitiveType_Name(in.element_type()).c_str());
+ FftType_Name(fft_type),
+ PrimitiveType_Name(in.element_type()));
}
RET_CHECK_RANK(in);
return in;
case RFFT: {
if (in.element_type() != F32) {
return InvalidArgument("RFFT requires F32 input type, found %s.",
- PrimitiveType_Name(in.element_type()).c_str());
+ PrimitiveType_Name(in.element_type()));
}
RET_CHECK_RANK(in);
for (int i = 0; i < fft_rank; i++) {
@@ -1750,7 +1736,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
fft_length[i]) {
return InvalidArgument(
"RFFT requires innermost dimensions match fft_length but "
- "dimension %lld is %lld and should be %lld.",
+ "dimension %d is %d and should be %d.",
in.dimensions_size() - fft_rank + i,
in.dimensions(in.dimensions_size() - fft_rank + i),
fft_length[i]);
@@ -1764,7 +1750,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
case IRFFT: {
if (in.element_type() != C64) {
return InvalidArgument("IRFFT requires C64 input type, found %s.",
- PrimitiveType_Name(in.element_type()).c_str());
+ PrimitiveType_Name(in.element_type()));
}
RET_CHECK_RANK(in);
Shape result = ShapeUtil::ComplexComponentShape(in);
@@ -1773,7 +1759,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
fft_length[i]) {
return InvalidArgument(
"IRFFT requires all but one innermost dimensions match "
- "fft_length, but dimension %lld is %lld and should be %lld.",
+ "fft_length, but dimension %d is %d and should be %d.",
in.dimensions_size() - fft_rank + i,
in.dimensions(in.dimensions_size() - fft_rank + i),
fft_length[i]);
@@ -1783,7 +1769,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
fft_length[fft_rank - 1] / 2 + 1) {
return InvalidArgument(
"IRFFT requires innermost dimension matches fft_length/2+1, but "
- "dimension %d is %lld and should be %lld.",
+ "dimension %d is %d and should be %d.",
in.dimensions_size() - 1, in.dimensions(in.dimensions_size() - 1),
fft_length[fft_rank - 1] / 2 + 1);
}
@@ -1819,18 +1805,18 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
TF_RET_CHECK(split_count > 0);
if (split_dimension >= ShapeUtil::Rank(shape) || split_dimension < 0) {
return InvalidArgument(
- "AllToAll split_dimension %lld is out-of-bounds in shape %s.",
- split_dimension, ShapeUtil::HumanString(shape).c_str());
+ "AllToAll split_dimension %d is out-of-bounds in shape %s.",
+ split_dimension, ShapeUtil::HumanString(shape));
}
if (concat_dimension >= ShapeUtil::Rank(shape) || concat_dimension < 0) {
return InvalidArgument(
- "AllToAll concat_dimension %lld is out-of-bounds in shape %s.",
- concat_dimension, ShapeUtil::HumanString(shape).c_str());
+ "AllToAll concat_dimension %d is out-of-bounds in shape %s.",
+ concat_dimension, ShapeUtil::HumanString(shape));
}
if (shape.dimensions(split_dimension) % split_count != 0) {
return InvalidArgument(
- "AllToAll split dimension size %lld must be dividable by split_count "
- "%lld.",
+ "AllToAll split dimension size %d must be dividable by split_count "
+ "%d.",
shape.dimensions(split_dimension), split_count);
}
std::vector<int64> new_dimensions(shape.dimensions().begin(),
@@ -1850,8 +1836,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"HLO all-to-all has operands with different shapes: the 0th "
"operand shape %s, but the %dth operand has shape %s.",
- ShapeUtil::HumanString(*operand_shapes[0]).c_str(), i,
- ShapeUtil::HumanString(*operand_shapes[i]).c_str());
+ ShapeUtil::HumanString(*operand_shapes[0]), i,
+ ShapeUtil::HumanString(*operand_shapes[i]));
}
}
@@ -1880,9 +1866,9 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (!ShapeUtil::SameDimensions(*reduced_args[0], *reduced_args[i])) {
return InvalidArgument(
"All reduced tensors must have the sime dimension. Tensor 0 has "
- "shape %s, Tensor %lld has shape %s",
- ShapeUtil::HumanString(*reduced_args[0]).c_str(), i,
- ShapeUtil::HumanString(*reduced_args[i]).c_str());
+ "shape %s, Tensor %d has shape %s",
+ ShapeUtil::HumanString(*reduced_args[0]), i,
+ ShapeUtil::HumanString(*reduced_args[i]));
}
}
@@ -1892,9 +1878,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
const Shape& arg = *reduced_args[0];
for (int64 dimension : dimensions_to_reduce) {
if (dimension >= ShapeUtil::Rank(arg) || dimension < 0) {
- return InvalidArgument(
- "Reducing out-of-bounds dimension %lld in shape %s.", dimension,
- ShapeUtil::HumanString(arg).c_str());
+ return InvalidArgument("Reducing out-of-bounds dimension %d in shape %s.",
+ dimension, ShapeUtil::HumanString(arg));
}
}
@@ -1967,16 +1952,16 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Select function's first parameter shape currently must "
"match the operand element shape, but got %s vs %s.",
- ShapeUtil::HumanString(select_shape.parameters(0)).c_str(),
- ShapeUtil::HumanString(operand_element_shape).c_str());
+ ShapeUtil::HumanString(select_shape.parameters(0)),
+ ShapeUtil::HumanString(operand_element_shape));
}
if (!ShapeUtil::CompatibleIgnoringFpPrecision(operand_element_shape,
select_shape.parameters(1))) {
return InvalidArgument(
"Select function's second parameter shape currently must "
"match the operand element shape, but got %s vs %s.",
- ShapeUtil::HumanString(select_shape.parameters(1)).c_str(),
- ShapeUtil::HumanString(operand_element_shape).c_str());
+ ShapeUtil::HumanString(select_shape.parameters(1)),
+ ShapeUtil::HumanString(operand_element_shape));
}
// Check if the scatter function has a proper shape as a reduction.
@@ -1994,8 +1979,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Source shape does not match the shape of window-reduced operand: "
"source(%s), window-reduced operand(%s).",
- ShapeUtil::HumanString(source_shape).c_str(),
- ShapeUtil::HumanString(window_result_shape).c_str());
+ ShapeUtil::HumanString(source_shape),
+ ShapeUtil::HumanString(window_result_shape));
}
return operand_shape;
}
@@ -2008,29 +1993,27 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"%s in slice operation; argument shape: %s; starts: {%s}; limits: "
"{%s}; strides: {%s}.",
- message.c_str(), ShapeUtil::HumanString(arg).c_str(),
- StrJoin(starts, ",").c_str(), StrJoin(limits, ",").c_str(),
- StrJoin(strides, ",").c_str());
+ message, ShapeUtil::HumanString(arg), StrJoin(starts, ","),
+ StrJoin(limits, ","), StrJoin(strides, ","));
};
TF_RETURN_IF_ERROR(ExpectArray(arg, "operand of slice"));
- VLOG(2) << tensorflow::strings::Printf(
- "slicing shape %s starts={%s} limits={%s}",
- ShapeUtil::HumanString(arg).c_str(), StrJoin(starts, ", ").c_str(),
- StrJoin(limits, ", ").c_str());
+ VLOG(2) << StrFormat("slicing shape %s starts={%s} limits={%s}",
+ ShapeUtil::HumanString(arg), StrJoin(starts, ", "),
+ StrJoin(limits, ", "));
if (starts.size() != limits.size()) {
- return error(Printf("slice start and limit sizes differ: %zu vs %zu",
- starts.size(), limits.size()));
+ return error(StrFormat("slice start and limit sizes differ: %u vs %u",
+ starts.size(), limits.size()));
}
if (starts.size() != strides.size()) {
- return error(Printf("slice start and strides sizes differ: %zu vs %zu",
- starts.size(), strides.size()));
+ return error(StrFormat("slice start and strides sizes differ: %u vs %u",
+ starts.size(), strides.size()));
}
if (starts.size() != ShapeUtil::Rank(arg)) {
return InvalidArgument(
- "Slice index count does not match argument rank: %zu vs %lld.",
+ "Slice index count does not match argument rank: %u vs %d.",
starts.size(), ShapeUtil::Rank(arg));
}
@@ -2040,27 +2023,24 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
int64 limit_index = limits[dimension];
int64 stride = strides[dimension];
if (start_index < 0) {
- return InvalidArgument("Negative start index to slice: %lld.",
- start_index);
+ return InvalidArgument("Negative start index to slice: %d.", start_index);
}
if (limit_index > arg.dimensions(dimension)) {
return error(
- Printf("limit index (%lld) must be less than or equal to dimension "
- "size (%lld)",
- limit_index, arg.dimensions(dimension)));
- }
- VLOG(2) << tensorflow::strings::Printf("starts[%lld] = %lld", dimension,
- start_index);
- VLOG(2) << tensorflow::strings::Printf("limits[%lld] = %lld", dimension,
- limit_index);
+ StrFormat("limit index (%d) must be less than or equal to dimension "
+ "size (%d)",
+ limit_index, arg.dimensions(dimension)));
+ }
+ VLOG(2) << StrFormat("starts[%d] = %d", dimension, start_index);
+ VLOG(2) << StrFormat("limits[%d] = %d", dimension, limit_index);
if (start_index > limit_index) {
return error(
- Printf("limit index (%lld) must be greater or equal to "
- "start index (%lld) in slice with positive stride",
- limit_index, start_index));
+ StrFormat("limit index (%d) must be greater or equal to "
+ "start index (%d) in slice with positive stride",
+ limit_index, start_index));
}
if (stride <= 0) {
- return InvalidArgument("Stride (%lld) must be positive.", stride);
+ return InvalidArgument("Stride (%d) must be positive.", stride);
}
sizes.push_back((limit_index - start_index + stride - 1) / stride);
}
@@ -2075,15 +2055,14 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
TF_RETURN_IF_ERROR(
ExpectArray(start_indices_shape, "start indices of dynamic slice"));
- VLOG(2) << tensorflow::strings::Printf(
+ VLOG(2) << StrFormat(
"slicing shape %s at dynamic start_indices %s with slice_sizes={%s}",
- ShapeUtil::HumanString(operand_shape).c_str(),
- ShapeUtil::HumanString(start_indices_shape).c_str(),
- StrJoin(slice_sizes, ", ").c_str());
+ ShapeUtil::HumanString(operand_shape),
+ ShapeUtil::HumanString(start_indices_shape), StrJoin(slice_sizes, ", "));
if (ShapeUtil::Rank(start_indices_shape) != 1) {
return InvalidArgument(
- "Dynamic slice start indices of rank %lld must be rank1.",
+ "Dynamic slice start indices of rank %d must be rank1.",
ShapeUtil::Rank(start_indices_shape));
}
@@ -2095,16 +2074,15 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
const int64 start_num_dims = start_indices_shape.dimensions(0);
if (ShapeUtil::Rank(operand_shape) != start_num_dims) {
return InvalidArgument(
- "Dynamic slice start number of dimensions %lld (%s) must match rank "
- "%lld of slice input (%s).",
- start_num_dims, ShapeUtil::HumanString(start_indices_shape).c_str(),
- ShapeUtil::Rank(operand_shape),
- ShapeUtil::HumanString(operand_shape).c_str());
+ "Dynamic slice start number of dimensions %d (%s) must match rank "
+ "%d of slice input (%s).",
+ start_num_dims, ShapeUtil::HumanString(start_indices_shape),
+ ShapeUtil::Rank(operand_shape), ShapeUtil::HumanString(operand_shape));
}
if (slice_sizes.size() != ShapeUtil::Rank(operand_shape)) {
return InvalidArgument(
- "Dynamic slice index count does not match argument rank: %zu vs %lld.",
+ "Dynamic slice index count does not match argument rank: %u vs %d.",
slice_sizes.size(), ShapeUtil::Rank(operand_shape));
}
@@ -2112,16 +2090,15 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
const int64 input_dim_size = operand_shape.dimensions(dim);
const int64 slice_dim_size = slice_sizes[dim];
if (slice_dim_size < 0) {
- return InvalidArgument("Negative size index to dynamic slice: %lld.",
+ return InvalidArgument("Negative size index to dynamic slice: %d.",
slice_dim_size);
}
if (slice_dim_size > input_dim_size) {
return InvalidArgument(
- "Slice dim size %lld greater than dynamic slice dimension: %lld.",
+ "Slice dim size %d greater than dynamic slice dimension: %d.",
slice_dim_size, input_dim_size);
}
- VLOG(2) << tensorflow::strings::Printf("slice_sizes[%lld] = %lld", dim,
- slice_dim_size);
+ VLOG(2) << StrFormat("slice_sizes[%d] = %d", dim, slice_dim_size);
}
return ShapeUtil::MakeShape(operand_shape.element_type(), slice_sizes);
@@ -2137,16 +2114,16 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
TF_RETURN_IF_ERROR(ExpectArray(start_indices_shape,
"start indices of dynamic update slice"));
- VLOG(2) << tensorflow::strings::Printf(
+ VLOG(2) << StrFormat(
"updating slice of shape %s at dynamic start_indices %s with update "
"shape %s",
- ShapeUtil::HumanString(operand_shape).c_str(),
- ShapeUtil::HumanString(start_indices_shape).c_str(),
- ShapeUtil::HumanString(update_shape).c_str());
+ ShapeUtil::HumanString(operand_shape),
+ ShapeUtil::HumanString(start_indices_shape),
+ ShapeUtil::HumanString(update_shape));
if (ShapeUtil::Rank(start_indices_shape) != 1) {
return InvalidArgument(
- "Dynamic update slice start indices of rank %lld must be rank1.",
+ "Dynamic update slice start indices of rank %d must be rank1.",
ShapeUtil::Rank(start_indices_shape));
}
@@ -2158,17 +2135,16 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
const int64 start_num_dims = start_indices_shape.dimensions(0);
if (ShapeUtil::Rank(operand_shape) != start_num_dims) {
return InvalidArgument(
- "Dynamic update slice start number of dimensions %lld (%s) must match "
- "rank %lld of slice input (%s).",
- start_num_dims, ShapeUtil::HumanString(start_indices_shape).c_str(),
- ShapeUtil::Rank(operand_shape),
- ShapeUtil::HumanString(operand_shape).c_str());
+ "Dynamic update slice start number of dimensions %d (%s) must match "
+ "rank %d of slice input (%s).",
+ start_num_dims, ShapeUtil::HumanString(start_indices_shape),
+ ShapeUtil::Rank(operand_shape), ShapeUtil::HumanString(operand_shape));
}
if (ShapeUtil::Rank(update_shape) != ShapeUtil::Rank(operand_shape)) {
return InvalidArgument(
"Dynamic update slice update rank does not match argument rank: "
- "%lld vs %lld.",
+ "%d vs %d.",
ShapeUtil::Rank(update_shape), ShapeUtil::Rank(operand_shape));
}
@@ -2177,8 +2153,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Dynamic update slice update element type does not match argument. "
"operand.element_type: %s vs update.element_type: %s.",
- PrimitiveType_Name(operand_shape.element_type()).c_str(),
- PrimitiveType_Name(update_shape.element_type()).c_str());
+ PrimitiveType_Name(operand_shape.element_type()),
+ PrimitiveType_Name(update_shape.element_type()));
}
for (int64 dim = 0; dim < ShapeUtil::Rank(operand_shape); ++dim) {
@@ -2186,16 +2162,15 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
const int64 update_dim_size = update_shape.dimensions(dim);
if (update_dim_size < 0) {
return InvalidArgument(
- "Size index %lld to dynamic update slice must be >= 0.",
+ "Size index %d to dynamic update slice must be >= 0.",
update_dim_size);
}
if (update_dim_size > input_dim_size) {
return InvalidArgument(
- "Update dim size %lld greater than dynamic slice dimension: %lld.",
+ "Update dim size %d greater than dynamic slice dimension: %d.",
update_dim_size, input_dim_size);
}
- VLOG(2) << tensorflow::strings::Printf("update_sizes[%lld] = %lld", dim,
- update_dim_size);
+ VLOG(2) << StrFormat("update_sizes[%d] = %d", dim, update_dim_size);
}
return operand_shape;
@@ -2210,8 +2185,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
for (int64 dimension : dimensions) {
if (dimension >= ShapeUtil::Rank(operand_shape) || dimension < 0) {
return InvalidArgument(
- "One of the reverse dimensions (%lld) is out-of-bounds in shape %s.",
- dimension, ShapeUtil::HumanString(operand_shape).c_str());
+ "One of the reverse dimensions (%d) is out-of-bounds in shape %s.",
+ dimension, ShapeUtil::HumanString(operand_shape));
}
}
return operand_shape;
@@ -2222,14 +2197,14 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (!ShapeUtil::IsTuple(arg)) {
return InvalidArgument(
"Cannot infer shape: attempting to index into non-tuple: %s.",
- ShapeUtil::HumanString(arg).c_str());
+ ShapeUtil::HumanString(arg));
}
if (index >= arg.tuple_shapes_size()) {
return InvalidArgument(
- "Cannot infer shape: attempt to index out of tuple bounds: %lld "
+ "Cannot infer shape: attempt to index out of tuple bounds: %d "
">= %d in shape %s.",
- index, arg.tuple_shapes_size(), ShapeUtil::HumanString(arg).c_str());
+ index, arg.tuple_shapes_size(), ShapeUtil::HumanString(arg));
}
return arg.tuple_shapes(index);
@@ -2249,17 +2224,15 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
}
auto shape_string = [&]() {
- return tensorflow::strings::Printf(
- "Condition: %s; body: %s; init: %s.",
- ShapeUtil::HumanString(condition).c_str(),
- ShapeUtil::HumanString(body).c_str(),
- ShapeUtil::HumanString(init).c_str());
+ return StrFormat(
+ "Condition: %s; body: %s; init: %s.", ShapeUtil::HumanString(condition),
+ ShapeUtil::HumanString(body), ShapeUtil::HumanString(init));
};
// Check the shapes of computation parameters and return types.
if (!ShapeUtil::ShapeIs(condition.result(), PRED, {})) {
return InvalidArgument("Condition must return a boolean; got %s.",
- shape_string().c_str());
+ shape_string());
}
if (!ShapeUtil::Compatible(body.result(), condition.parameters(0)) ||
!ShapeUtil::Compatible(body.result(), body.parameters(0)) ||
@@ -2267,7 +2240,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"The parameter of condition and body, the result of the body, and init "
"must all have the same shape; got %s.",
- shape_string().c_str());
+ shape_string());
}
return init;
@@ -2279,7 +2252,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
const ProgramShape& false_computation) {
if (!ShapeUtil::ShapeIs(predicate, PRED, {})) {
return InvalidArgument("Predicate must be a boolean; got %s.",
- ShapeUtil::HumanString(predicate).c_str());
+ ShapeUtil::HumanString(predicate));
}
if (true_computation.parameters_size() != 1) {
@@ -2288,15 +2261,14 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
}
if (!ShapeUtil::Compatible(true_computation.parameters(0), true_operand)) {
auto true_shape_string = [&]() {
- return tensorflow::strings::Printf(
- "true_operand: %s; true_computation: %s",
- ShapeUtil::HumanString(true_operand).c_str(),
- ShapeUtil::HumanString(true_computation).c_str());
+ return StrFormat("true_operand: %s; true_computation: %s",
+ ShapeUtil::HumanString(true_operand),
+ ShapeUtil::HumanString(true_computation));
};
return InvalidArgument(
"true_operand must match the shape of the only parameter of "
"true_computation: got %s.",
- true_shape_string().c_str());
+ true_shape_string());
}
if (false_computation.parameters_size() != 1) {
@@ -2305,28 +2277,27 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
}
if (!ShapeUtil::Compatible(false_computation.parameters(0), false_operand)) {
auto false_shape_string = [&]() {
- return tensorflow::strings::Printf(
- "false_operand: %s; false_computation: %s",
- ShapeUtil::HumanString(false_operand).c_str(),
- ShapeUtil::HumanString(false_computation).c_str());
+ return StrFormat("false_operand: %s; false_computation: %s",
+ ShapeUtil::HumanString(false_operand),
+ ShapeUtil::HumanString(false_computation));
};
return InvalidArgument(
"false_operand must match the shape of the only parameter of "
"false_computation: got %s.",
- false_shape_string().c_str());
+ false_shape_string());
}
if (!ShapeUtil::Compatible(true_computation.result(),
false_computation.result())) {
auto shape_string = [&]() {
- return tensorflow::strings::Printf(
+ return StrFormat(
"true_computation result: %s; false_computation result: %s.",
- ShapeUtil::HumanString(true_computation.result()).c_str(),
- ShapeUtil::HumanString(false_computation.result()).c_str());
+ ShapeUtil::HumanString(true_computation.result()),
+ ShapeUtil::HumanString(false_computation.result()));
};
return InvalidArgument(
"the result of true_computation and false_computation must have the "
"same shape: got %s.",
- shape_string().c_str());
+ shape_string());
}
return true_computation.result();
}
@@ -2336,7 +2307,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
TF_RETURN_IF_ERROR(ExpectArray(operand, "operand of broadcast"));
for (int64 size : broadcast_sizes) {
if (size < 0) {
- return InvalidArgument("Broadcast with negative dimension size %lld.",
+ return InvalidArgument("Broadcast with negative dimension size %d.",
size);
}
}
@@ -2361,11 +2332,11 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (ShapeUtil::ElementsIn(operand) != ShapeUtil::ElementsIn(inferred_shape)) {
return InvalidArgument(
- "Reshape operation has mismatched element counts: from=%lld (%s) "
- "to=%lld (%s).",
- ShapeUtil::ElementsIn(operand), ShapeUtil::HumanString(operand).c_str(),
+ "Reshape operation has mismatched element counts: from=%d (%s) "
+ "to=%d (%s).",
+ ShapeUtil::ElementsIn(operand), ShapeUtil::HumanString(operand),
ShapeUtil::ElementsIn(inferred_shape),
- ShapeUtil::HumanString(inferred_shape).c_str());
+ ShapeUtil::HumanString(inferred_shape));
}
std::vector<int64> indices(ShapeUtil::Rank(operand));
@@ -2376,8 +2347,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Reshape dimensions [%s] are not a permutation of the operand "
"dimensions (operand shape is %s).",
- StrJoin(dimensions, ",").c_str(),
- ShapeUtil::HumanString(operand).c_str());
+ StrJoin(dimensions, ","), ShapeUtil::HumanString(operand));
}
return inferred_shape;
@@ -2412,9 +2382,9 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(min, operand) ||
!ShapeUtil::SameElementTypeIgnoringFpPrecision(max, operand)) {
return InvalidArgument("Clamp with different operand types: %s, %s, %s.",
- ShapeUtil::HumanString(min).c_str(),
- ShapeUtil::HumanString(operand).c_str(),
- ShapeUtil::HumanString(max).c_str());
+ ShapeUtil::HumanString(min),
+ ShapeUtil::HumanString(operand),
+ ShapeUtil::HumanString(max));
}
if (((ShapeUtil::CompatibleIgnoringFpPrecision(min, operand) ||
ShapeUtil::IsScalar(min)) &&
@@ -2431,9 +2401,9 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return ShapeUtil::ChangeElementType(min, operand.element_type());
}
}
- return Unimplemented(
- "%s, %s <clamp> %s is not implemented.", min.ShortDebugString().c_str(),
- max.ShortDebugString().c_str(), operand.ShortDebugString().c_str());
+ return Unimplemented("%s, %s <clamp> %s is not implemented.",
+ min.ShortDebugString(), max.ShortDebugString(),
+ operand.ShortDebugString());
}
// TODO(b/36794510): Make broadcast semantics more consistent, by supporting
@@ -2444,13 +2414,12 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (!ShapeUtil::CompatibleIgnoringFpPrecision(on_true, on_false)) {
return InvalidArgument(
"Operands to select must be the same shape; got %s and %s.",
- ShapeUtil::HumanString(on_true).c_str(),
- ShapeUtil::HumanString(on_false).c_str());
+ ShapeUtil::HumanString(on_true), ShapeUtil::HumanString(on_false));
}
if (pred.element_type() != PRED) {
return InvalidArgument(
"Select's pred operand must have PRED element type; got %s.",
- ShapeUtil::HumanString(pred).c_str());
+ ShapeUtil::HumanString(pred));
}
if (ShapeUtil::CompatibleIgnoringElementType(pred, on_true) ||
ShapeUtil::IsScalar(pred)) {
@@ -2463,7 +2432,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Select operation with non-scalar predicate with dimensionality "
" different from the other operands: %s.",
- ShapeUtil::HumanString(pred).c_str());
+ ShapeUtil::HumanString(pred));
}
}
@@ -2474,18 +2443,17 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (!ShapeUtil::Compatible(on_true, on_false)) {
return InvalidArgument(
"Operands to tuple-select must be the same shape; got %s and %s.",
- ShapeUtil::HumanString(on_true).c_str(),
- ShapeUtil::HumanString(on_false).c_str());
+ ShapeUtil::HumanString(on_true), ShapeUtil::HumanString(on_false));
}
if (pred.element_type() != PRED) {
return InvalidArgument(
"TupleSelect's pred operand must have PRED element type; got %s.",
- ShapeUtil::HumanString(pred).c_str());
+ ShapeUtil::HumanString(pred));
}
if (!ShapeUtil::IsScalar(pred)) {
return InvalidArgument(
"TupleSelect operation with non-scalar predicate: %s.",
- ShapeUtil::HumanString(pred).c_str());
+ ShapeUtil::HumanString(pred));
}
return on_true;
}
@@ -2502,10 +2470,10 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
});
return InvalidArgument(
"Call applied function arity must match number of arguments; got: "
- "arity: %d, arguments: %zu; computation signature: %s; argument "
+ "arity: %d, arguments: %u; computation signature: %s; argument "
"shapes: [%s].",
- to_apply.parameters_size(), arg_shapes.size(),
- computation_signature.c_str(), argument_shapes.c_str());
+ to_apply.parameters_size(), arg_shapes.size(), computation_signature,
+ argument_shapes);
}
// All arguments must be compatible with the program shape.
@@ -2516,8 +2484,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Call parameter must match argument; got parameter %d shape: %s, "
"argument shape: %s.",
- i, ShapeUtil::HumanString(param_shape).c_str(),
- ShapeUtil::HumanString(arg_shape).c_str());
+ i, ShapeUtil::HumanString(param_shape),
+ ShapeUtil::HumanString(arg_shape));
}
}
@@ -2531,14 +2499,14 @@ static Status ValidateGatherDimensionNumbers(
if (!absl::c_is_sorted(dim_numbers.offset_dims())) {
return InvalidArgument(
"Output window dimensions in gather op must be ascending; got: %s.",
- StrJoin(dim_numbers.offset_dims(), ", ").c_str());
+ StrJoin(dim_numbers.offset_dims(), ", "));
}
if (absl::c_adjacent_find(dim_numbers.offset_dims()) !=
dim_numbers.offset_dims().end()) {
return InvalidArgument(
"Output window dimensions in gather op must not repeat; got: %s.",
- StrJoin(dim_numbers.offset_dims(), ", ").c_str());
+ StrJoin(dim_numbers.offset_dims(), ", "));
}
const int64 output_offset_dim_count = dim_numbers.offset_dims_size();
@@ -2549,9 +2517,9 @@ static Status ValidateGatherDimensionNumbers(
int64 offset_dim = dim_numbers.offset_dims(i);
if (offset_dim < 0 || offset_dim >= output_shape_rank) {
return InvalidArgument(
- "Offset dimension %d in gather op is out of bounds; got %lld, but "
+ "Offset dimension %d in gather op is out of bounds; got %d, but "
"should "
- "have been in [0,%lld).",
+ "have been in [0,%d).",
i, offset_dim, output_shape_rank);
}
}
@@ -2560,8 +2528,8 @@ static Status ValidateGatherDimensionNumbers(
start_indices_shape[dim_numbers.index_vector_dim()]) {
return InvalidArgument(
"Gather op has %d elements in start_index_map and the "
- "bound of dimension index_vector_dim=%lld of start_indices is "
- "%lld. These two numbers must be equal.",
+ "bound of dimension index_vector_dim=%d of start_indices is "
+ "%d. These two numbers must be equal.",
dim_numbers.start_index_map_size(), dim_numbers.index_vector_dim(),
start_indices_shape[dim_numbers.index_vector_dim()]);
}
@@ -2571,7 +2539,7 @@ static Status ValidateGatherDimensionNumbers(
if (operand_dim_for_start_index_i < 0 ||
operand_dim_for_start_index_i >= input_shape.dimensions_size()) {
return InvalidArgument(
- "Invalid start_index_map; domain is [0, %d), got: %d->%lld.",
+ "Invalid start_index_map; domain is [0, %d), got: %d->%d.",
input_shape.dimensions_size(), i, operand_dim_for_start_index_i);
}
}
@@ -2587,14 +2555,14 @@ static Status ValidateGatherDimensionNumbers(
return InvalidArgument(
"Repeated dimensions are not allowed in start_index_map; "
"got: %s.",
- StrJoin(dim_numbers.start_index_map(), ", ").c_str());
+ StrJoin(dim_numbers.start_index_map(), ", "));
}
for (int64 collapsed_dim : dim_numbers.collapsed_slice_dims()) {
if (collapsed_dim < 0 || collapsed_dim >= input_shape.dimensions_size()) {
return InvalidArgument(
"Invalid collapsed_slice_dims set in gather op; valid range is [0, "
- "%d), got: %lld.",
+ "%d), got: %d.",
input_shape.dimensions_size(), collapsed_dim);
}
}
@@ -2602,7 +2570,7 @@ static Status ValidateGatherDimensionNumbers(
if (!absl::c_is_sorted(dim_numbers.collapsed_slice_dims())) {
return InvalidArgument(
"collapsed_slice_dims in gather op must be sorted; got: %s",
- StrJoin(dim_numbers.collapsed_slice_dims(), ", ").c_str());
+ StrJoin(dim_numbers.collapsed_slice_dims(), ", "));
}
if (absl::c_adjacent_find(dim_numbers.collapsed_slice_dims()) !=
@@ -2610,7 +2578,7 @@ static Status ValidateGatherDimensionNumbers(
return InvalidArgument(
"Repeated dimensions not allowed in collapsed_slice_dims in gather op; "
"got: %s.",
- StrJoin(dim_numbers.collapsed_slice_dims(), ", ").c_str());
+ StrJoin(dim_numbers.collapsed_slice_dims(), ", "));
}
return Status::OK();
@@ -2628,7 +2596,7 @@ static Status ValidateGatherDimensionNumbers(
if (!ShapeUtil::ElementIsIntegral(start_indices_shape)) {
return InvalidArgument(
"Gather indices parameter must be an integral tensor; got %s.",
- ShapeUtil::HumanString(start_indices_shape).c_str());
+ ShapeUtil::HumanString(start_indices_shape));
}
// We implicitly reshape gather indices of shape P[A,B,C] to P[A,B,C,1] if
@@ -2641,7 +2609,7 @@ static Status ValidateGatherDimensionNumbers(
return InvalidArgument(
"Gather index leaf dimension must be within [0, rank(start_indices) + "
"1). rank(start_indices) is %d and gather index leaf dimension is "
- "%lld.",
+ "%d.",
start_indices_shape.dimensions_size(),
gather_dim_numbers.index_vector_dim());
}
@@ -2672,9 +2640,8 @@ static Status ValidateGatherDimensionNumbers(
"All components of the offset index in a gather op must either be a "
"offset dimension or explicitly collapsed; got len(slice_sizes)=%lu, "
"output_slice_sizes=%s, collapsed_slice_dims=%s.",
- slice_sizes.size(),
- StrJoin(gather_dim_numbers.offset_dims(), ",").c_str(),
- StrJoin(gather_dim_numbers.collapsed_slice_dims(), ",").c_str());
+ slice_sizes.size(), StrJoin(gather_dim_numbers.offset_dims(), ","),
+ StrJoin(gather_dim_numbers.collapsed_slice_dims(), ","));
}
for (int i = 0; i < slice_sizes.size(); i++) {
@@ -2683,7 +2650,7 @@ static Status ValidateGatherDimensionNumbers(
if (slice_size < 0 || slice_size > corresponding_input_size) {
return InvalidArgument(
"Slice size at index %d in gather op is out of range, must be "
- "within [0, %lld), got %lld.",
+ "within [0, %d), got %d.",
i, corresponding_input_size + 1, slice_size);
}
}
@@ -2692,7 +2659,7 @@ static Status ValidateGatherDimensionNumbers(
if (slice_sizes[gather_dim_numbers.collapsed_slice_dims(i)] != 1) {
return InvalidArgument(
"Gather op can only collapse slice dims with bound 1, but bound is "
- "%lld for index %lld at position %d.",
+ "%d for index %d at position %d.",
slice_sizes[gather_dim_numbers.collapsed_slice_dims(i)],
gather_dim_numbers.collapsed_slice_dims(i), i);
}
@@ -2737,20 +2704,20 @@ Status ValidateScatterDimensionNumbers(
if (!absl::c_is_sorted(dim_numbers.update_window_dims())) {
return InvalidArgument(
"update_window_dims in scatter op must be sorted; got: %s.",
- StrJoin(dim_numbers.update_window_dims(), ", ").c_str());
+ StrJoin(dim_numbers.update_window_dims(), ", "));
}
if (absl::c_adjacent_find(dim_numbers.update_window_dims()) !=
dim_numbers.update_window_dims().end()) {
return InvalidArgument(
"update_window_dims in scatter op must not repeat; got: %s.",
- StrJoin(dim_numbers.update_window_dims(), ", ").c_str());
+ StrJoin(dim_numbers.update_window_dims(), ", "));
}
const int64 updates_rank = ShapeUtil::Rank(updates_shape);
for (int64 window_dim : dim_numbers.update_window_dims()) {
if (window_dim < 0 || window_dim >= updates_rank) {
return InvalidArgument(
"Invalid update_window_dims set in scatter op; valid range is [0, "
- "%lld). got: %lld.",
+ "%d). got: %d.",
updates_rank, window_dim);
}
}
@@ -2759,19 +2726,19 @@ Status ValidateScatterDimensionNumbers(
if (!absl::c_is_sorted(dim_numbers.inserted_window_dims())) {
return InvalidArgument(
"inserted_window_dims in scatter op must be sorted; got: %s.",
- StrJoin(dim_numbers.inserted_window_dims(), ", ").c_str());
+ StrJoin(dim_numbers.inserted_window_dims(), ", "));
}
if (absl::c_adjacent_find(dim_numbers.inserted_window_dims()) !=
dim_numbers.inserted_window_dims().end()) {
return InvalidArgument(
"inserted_window_dims in scatter op must not repeat; got: %s.",
- StrJoin(dim_numbers.inserted_window_dims(), ", ").c_str());
+ StrJoin(dim_numbers.inserted_window_dims(), ", "));
}
for (int64 inserted_dim : dim_numbers.inserted_window_dims()) {
if (inserted_dim < 0 || inserted_dim >= operand_shape.dimensions_size()) {
return InvalidArgument(
"Invalid inserted_window_dims set in scatter op; valid range is [0, "
- "%d), got: %lld.",
+ "%d), got: %d.",
operand_shape.dimensions_size(), inserted_dim);
}
}
@@ -2781,7 +2748,7 @@ Status ValidateScatterDimensionNumbers(
scatter_indices_shape[dim_numbers.index_vector_dim()]) {
return InvalidArgument(
"Scatter op has %d elements in scatter_dims_to_operand_dims and the "
- "bound of dimension index_vector_dim=%lld of scatter_indices is %lld. "
+ "bound of dimension index_vector_dim=%d of scatter_indices is %d. "
"These two numbers must be equal.",
dim_numbers.scatter_dims_to_operand_dims_size(),
dim_numbers.index_vector_dim(),
@@ -2794,7 +2761,7 @@ Status ValidateScatterDimensionNumbers(
scatter_dim_to_operand_dim >= operand_shape.dimensions_size()) {
return InvalidArgument(
"Invalid scatter_dims_to_operand_dims mapping; domain is [0, %d), "
- "got: %d->%lld.",
+ "got: %d->%d.",
operand_shape.dimensions_size(), i, scatter_dim_to_operand_dim);
}
}
@@ -2807,7 +2774,7 @@ Status ValidateScatterDimensionNumbers(
return InvalidArgument(
"Repeated dimensions not allowed in scatter_dims_to_operand_dims; "
"got: %s.",
- StrJoin(dim_numbers.scatter_dims_to_operand_dims(), ", ").c_str());
+ StrJoin(dim_numbers.scatter_dims_to_operand_dims(), ", "));
}
return Status::OK();
@@ -2828,7 +2795,7 @@ Status ValidateScatterDimensionNumbers(
if (!ShapeUtil::ElementIsIntegral(scatter_indices_shape)) {
return InvalidArgument(
"Scatter indices parameter must be an integral tensor; got %s.",
- ShapeUtil::HumanString(scatter_indices_shape).c_str());
+ ShapeUtil::HumanString(scatter_indices_shape));
}
if (scatter_indices_shape.dimensions_size() <
@@ -2837,7 +2804,7 @@ Status ValidateScatterDimensionNumbers(
return InvalidArgument(
"Scatter index leaf dimension must be within [0, rank(scatter_indices)"
" + 1). rank(scatter_indices) is %d and scatter index leaf dimension "
- "is %lld.",
+ "is %d.",
scatter_indices_shape.dimensions_size(),
scatter_dim_numbers.index_vector_dim());
}
@@ -2859,7 +2826,7 @@ Status ValidateScatterDimensionNumbers(
int64 expected_updates_rank = expanded_scatter_indices_shape.size() - 1 +
scatter_dim_numbers.update_window_dims_size();
if (ShapeUtil::Rank(updates_shape) != expected_updates_rank) {
- return InvalidArgument("Updates tensor must be of rank %lld; got %lld.",
+ return InvalidArgument("Updates tensor must be of rank %d; got %d.",
expected_updates_rank,
ShapeUtil::Rank(updates_shape));
}
@@ -2885,7 +2852,7 @@ Status ValidateScatterDimensionNumbers(
return InvalidArgument(
"Bounds of the window dimensions of updates must not exceed the "
"bounds of the corresponding dimensions of operand. For dimension "
- "%lld, updates bound is %lld, operand bound is %lld.",
+ "%d, updates bound is %d, operand bound is %d.",
update_window_dim, updates_shape.dimensions(update_window_dim),
max_update_slice_sizes[i]);
}
@@ -2906,8 +2873,8 @@ Status ValidateScatterDimensionNumbers(
return InvalidArgument(
"Bounds of the scatter dimensions of updates must be same as the "
"bounds of the corresponding dimensions of scatter indices. For "
- "scatter dimension %lld, updates bound is %lld, scatter_indices "
- "bound is %lld.",
+ "scatter dimension %d, updates bound is %d, scatter_indices "
+ "bound is %d.",
i, updates_shape.dimensions(i),
expanded_scatter_indices_shape[scatter_dims_seen]);
}
diff --git a/tensorflow/compiler/xla/service/shaped_buffer.cc b/tensorflow/compiler/xla/service/shaped_buffer.cc
index 5c12dc37b7..921a984589 100644
--- a/tensorflow/compiler/xla/service/shaped_buffer.cc
+++ b/tensorflow/compiler/xla/service/shaped_buffer.cc
@@ -20,19 +20,17 @@ limitations under the License.
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
+#include "absl/strings/str_format.h"
#include "tensorflow/compiler/xla/layout_util.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/lib/gtl/flatset.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/logging.h"
namespace xla {
-using ::tensorflow::strings::Appendf;
-
ShapedBuffer::ShapedBuffer(const Shape& on_host_shape,
const Shape& on_device_shape,
const se::Platform* platform, int device_ordinal)
@@ -93,9 +91,9 @@ string ShapedBuffer::ToString() const {
shape_str = ShapeUtil::HumanStringWithLayout(subshape);
}
const se::DeviceMemoryBase& memory = buffer(index);
- Appendf(&s, " %s%p (%lld bytes) : %s\n",
- string(index.size() * 2, ' ').c_str(), memory.opaque(),
- memory.size(), shape_str.c_str());
+ absl::StrAppendFormat(&s, " %s%p (%d bytes) : %s\n",
+ string(index.size() * 2, ' '), memory.opaque(),
+ memory.size(), shape_str);
});
return s;
}
diff --git a/tensorflow/compiler/xla/service/source_map_util.cc b/tensorflow/compiler/xla/service/source_map_util.cc
index 8cbaac7b37..dd53c7531b 100644
--- a/tensorflow/compiler/xla/service/source_map_util.cc
+++ b/tensorflow/compiler/xla/service/source_map_util.cc
@@ -15,6 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/source_map_util.h"
+#include "absl/strings/str_format.h"
#include "tensorflow/compiler/xla/util.h"
namespace xla {
@@ -26,11 +27,10 @@ Status InvalidParameterArgumentV(const OpMetadata& op_metadata,
string message;
tensorflow::strings::Appendv(&message, format, args);
if (!op_metadata.source_file().empty()) {
- tensorflow::strings::Appendf(&message, " (%s:%d)",
- op_metadata.source_file().c_str(),
- op_metadata.source_line());
+ absl::StrAppendFormat(&message, " (%s:%d)", op_metadata.source_file(),
+ op_metadata.source_line());
}
- return InvalidArgument("%s", message.c_str());
+ return InvalidArgument("%s", message);
}
} // namespace
diff --git a/tensorflow/compiler/xla/service/source_map_util.h b/tensorflow/compiler/xla/service/source_map_util.h
index 84607cd012..c5a7e17cb4 100644
--- a/tensorflow/compiler/xla/service/source_map_util.h
+++ b/tensorflow/compiler/xla/service/source_map_util.h
@@ -16,6 +16,7 @@ limitations under the License.
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_SOURCE_MAP_UTIL_H_
#define TENSORFLOW_COMPILER_XLA_SERVICE_SOURCE_MAP_UTIL_H_
+#include "absl/strings/str_format.h"
#include "tensorflow/compiler/xla/service/executable.h"
#include "tensorflow/compiler/xla/status.h"
#include "tensorflow/core/platform/macros.h"
@@ -24,21 +25,38 @@ namespace xla {
namespace source_map_util {
// Creates an INVALID_ARGUMENT status with the given format string.
+template <typename... Args>
+Status InvalidParameterArgument(const OpMetadata& op_metadata,
+ const absl::FormatSpec<Args...>& format,
+ const Args&... args) {
+ string message = absl::StrFormat(format, args...);
+ if (!op_metadata.source_file().empty()) {
+ absl::StrAppendFormat(&message, " (%s:%d)", op_metadata.source_file(),
+ op_metadata.source_line());
+ }
+ return InvalidArgument("%s", message);
+}
+
+// Creates an INVALID_ARGUMENT status with the given format string.
//
// Also, attempts to extract the OpMetadata for parameter_number on executable
// and append it to the status message for source mapping to user code.
//
// executable may be nullptr, but parameter_number should not be out of bounds
// or a CHECK-failure may occur.
+template <typename... Args>
Status InvalidParameterArgument(Executable* executable, int parameter_number,
- const char* format, ...)
- TF_PRINTF_ATTRIBUTE(3, 4);
-
-// As above, but takes the parameter metadata directly instead of extracting it
-// from the executable.
-Status InvalidParameterArgument(const OpMetadata& op_metadata,
- const char* format, ...)
- TF_PRINTF_ATTRIBUTE(2, 3);
+ const absl::FormatSpec<Args...>& format,
+ const Args&... args) {
+ if (executable != nullptr && executable->has_module()) {
+ const HloModule& module = executable->module();
+ const HloComputation& computation = *module.entry_computation();
+ HloInstruction* param = computation.parameter_instruction(parameter_number);
+ const OpMetadata& metadata = param->metadata();
+ return InvalidParameterArgument(metadata, format, args...);
+ }
+ return InvalidArgument(format, args...);
+}
} // namespace source_map_util
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/transfer_manager.cc b/tensorflow/compiler/xla/service/transfer_manager.cc
index 0c577ec67a..b8d2d546e5 100644
--- a/tensorflow/compiler/xla/service/transfer_manager.cc
+++ b/tensorflow/compiler/xla/service/transfer_manager.cc
@@ -149,7 +149,7 @@ Status TransferManager::TransferArrayToDeviceAsync(
if (dest.size() < GetByteSizeRequirement(on_device_shape)) {
return FailedPrecondition(
"Allocation on device not large enough for array: "
- "%lld < %lld",
+ "%d < %d",
dest.size(), GetByteSizeRequirement(on_device_shape));
}
ShapedBuffer shaped_buffer(/*on_host_shape=*/literal.shape(), on_device_shape,
@@ -166,12 +166,12 @@ void TransferManager::TransferArrayFromDevice(
auto error = StrCat("Shape ", ShapeUtil::HumanString(shape),
" has a differently shaped representation on-device: ",
ShapeUtil::HumanString(HostShapeToDeviceShape(shape)));
- return done(FailedPrecondition("%s", error.c_str()));
+ return done(FailedPrecondition("%s", error));
}
if (source.size() < GetByteSizeRequirement(shape)) {
return done(
FailedPrecondition("Allocation on device not large enough for array: "
- "%lld < %lld",
+ "%d < %d",
source.size(), GetByteSizeRequirement(shape)));
}
ShapedBuffer shaped_buffer(/*on_host_shape=*/shape, shape,
@@ -203,7 +203,7 @@ void TransferManager::TransferArrayFromDevice(
return NotFound(
"could not find registered transfer manager for platform %s -- check "
"target linkage",
- platform->Name().c_str());
+ platform->Name());
}
if (it->second.manager == nullptr) {
@@ -254,7 +254,7 @@ Status TransferManager::TransferBufferFromDevice(
if (source.size() < size) {
return FailedPrecondition(
"Source allocation on device not large enough for data tranfer: "
- "%lld < %lld",
+ "%d < %d",
source.size(), size);
}
stream->ThenMemcpy(destination, source, size);
@@ -267,7 +267,7 @@ Status TransferManager::TransferBufferToDevice(
if (destination->size() < size) {
return FailedPrecondition(
"Destination allocation on device not large enough for data tranfer: "
- "%lld < %lld",
+ "%d < %d",
destination->size(), size);
}
stream->ThenMemcpy(destination, source, size);
@@ -278,9 +278,8 @@ StatusOr<ScopedShapedBuffer> TransferManager::AllocateScopedShapedBuffer(
const Shape& on_host_shape, DeviceMemoryAllocator* allocator,
int device_ordinal) {
if (!LayoutUtil::HasLayout(on_host_shape)) {
- return InvalidArgument(
- "Shape must have a layout: %s",
- ShapeUtil::HumanStringWithLayout(on_host_shape).c_str());
+ return InvalidArgument("Shape must have a layout: %s",
+ ShapeUtil::HumanStringWithLayout(on_host_shape));
}
TF_RETURN_IF_ERROR(ShapeUtil::ValidateShape(on_host_shape));
const Shape on_device_shape = HostShapeToDeviceShape(on_host_shape);
diff --git a/tensorflow/compiler/xla/service/tuple_points_to_analysis.cc b/tensorflow/compiler/xla/service/tuple_points_to_analysis.cc
index cb07b8d4d3..cf00ca102b 100644
--- a/tensorflow/compiler/xla/service/tuple_points_to_analysis.cc
+++ b/tensorflow/compiler/xla/service/tuple_points_to_analysis.cc
@@ -21,6 +21,7 @@ limitations under the License.
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
+#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/service/hlo_dataflow_analysis.h"
@@ -29,7 +30,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/lib/core/errors.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/logging.h"
namespace xla {
@@ -462,21 +462,20 @@ Status TuplePointsToAnalysis::VerifyBuffer(const LogicalBuffer& buffer) const {
return FailedPrecondition(
"LogicalBuffer %s is ill-defined: instruction %s does not define a "
"buffer at that index",
- buffer.ToString().c_str(), buffer.instruction()->name().c_str());
+ buffer.ToString(), buffer.instruction()->name());
}
}
if (buffer.id() < 0 ||
buffer.id() >= logical_buffer_analysis_->num_logical_buffers()) {
- return FailedPrecondition(
- "LogicalBuffer %s is ill-defined: invalid id %lld",
- buffer.ToString().c_str(), buffer.id());
+ return FailedPrecondition("LogicalBuffer %s is ill-defined: invalid id %d",
+ buffer.ToString(), buffer.id());
}
if (GetBuffer(buffer.id()).instruction() != buffer.instruction() ||
GetBuffer(buffer.id()).index() != buffer.index()) {
return FailedPrecondition(
"LogicalBuffer %s is ill-defined: buffer with same id differs: %s",
- buffer.ToString().c_str(), GetBuffer(buffer.id()).ToString().c_str());
+ buffer.ToString(), GetBuffer(buffer.id()).ToString());
}
return Status::OK();
@@ -495,7 +494,7 @@ StatusOr<const LogicalBuffer*> TuplePointsToAnalysis::GetBufferDefinedAt(
if (buffers.size() != 1 || buffers[0]->instruction() != instruction) {
return FailedPrecondition(
"instruction %s does not define buffer at index {%s}",
- instruction->name().c_str(), absl::StrJoin(index, ",").c_str());
+ instruction->name(), absl::StrJoin(index, ","));
}
return buffers[0];
}
@@ -556,8 +555,8 @@ PointsToSet& TuplePointsToAnalysis::CreateCopiedPointsToSet(
}
string TuplePointsToAnalysis::ToString() const {
- string output = tensorflow::strings::Printf(
- "TuplePointsToSet for module %s:\n", module_->name().c_str());
+ string output =
+ absl::StrFormat("TuplePointsToSet for module %s:\n", module_->name());
for (const auto* computation : module_->MakeNonfusionComputations()) {
const char* entry =
computation == module_->entry_computation() ? "entry " : "";