aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/compiler/xla/service/shape_inference.cc
diff options
context:
space:
mode:
authorGravatar Justin Lebar <jlebar@google.com>2018-08-27 14:50:49 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-08-27 14:55:29 -0700
commitd57f5a82025702d573d478091dc9c385adf53c09 (patch)
treee0a9f6b00cd6846cf71a79e02779e1e1f1f9cfc5 /tensorflow/compiler/xla/service/shape_inference.cc
parent91f33732cb15f51eaf6ec86c82e42a74e351e061 (diff)
[XLA] Switch to absl::StrFormat.
Unlike Printf, StrFormat does not require type-length qualifiers, e.g %z, %ll. Nor does it require that you call c_str() to print strings. So these are fixed up here as well. PiperOrigin-RevId: 210435915
Diffstat (limited to 'tensorflow/compiler/xla/service/shape_inference.cc')
-rw-r--r--tensorflow/compiler/xla/service/shape_inference.cc699
1 files changed, 333 insertions, 366 deletions
diff --git a/tensorflow/compiler/xla/service/shape_inference.cc b/tensorflow/compiler/xla/service/shape_inference.cc
index 6a22f8bef4..ae6a366d25 100644
--- a/tensorflow/compiler/xla/service/shape_inference.cc
+++ b/tensorflow/compiler/xla/service/shape_inference.cc
@@ -23,6 +23,7 @@ limitations under the License.
#include "absl/algorithm/container.h"
#include "absl/strings/str_cat.h"
+#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/xla/shape_util.h"
@@ -34,15 +35,14 @@ limitations under the License.
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/flatset.h"
#include "tensorflow/core/lib/math/math_util.h"
-#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
namespace xla {
namespace {
+using absl::StrFormat;
using absl::StrJoin;
-using tensorflow::strings::Printf;
// Returns true if no element is present in slice more than once.
bool AllUnique(tensorflow::gtl::ArraySlice<int64> slice) {
@@ -52,8 +52,7 @@ bool AllUnique(tensorflow::gtl::ArraySlice<int64> slice) {
Status ExpectArray(const Shape& shape, absl::string_view op_type) {
if (!ShapeUtil::IsArray(shape)) {
return InvalidArgument("Expected array argument for %s, but got %s.",
- std::string(op_type).c_str(),
- ShapeUtil::HumanString(shape).c_str());
+ std::string(op_type), ShapeUtil::HumanString(shape));
}
return Status::OK();
}
@@ -65,7 +64,7 @@ Status VerifyReducerShape(
int64 inputs) {
if (reducer_shape.parameters_size() != inputs * 2) {
return InvalidArgument(
- "Reduction function must take %lld parameters, but "
+ "Reduction function must take %d parameters, but "
"takes %d parameter(s).",
inputs * 2, reducer_shape.parameters_size());
}
@@ -75,7 +74,7 @@ Status VerifyReducerShape(
if (ShapeUtil::IsArray(accumulator_shape)) {
if (inputs != 1) {
return InvalidArgument(
- "Reduction function must produce a tuple with %lld elements, but "
+ "Reduction function must produce a tuple with %d elements, but "
"produces a scalar",
inputs);
}
@@ -83,8 +82,8 @@ Status VerifyReducerShape(
} else if (ShapeUtil::IsTuple(accumulator_shape)) {
if (ShapeUtil::TupleElementCount(accumulator_shape) != inputs) {
return InvalidArgument(
- "Reduction function must produce a tuple with %lld elements, but has "
- "%lld elements",
+ "Reduction function must produce a tuple with %d elements, but has "
+ "%d elements",
inputs, ShapeUtil::TupleElementCount(accumulator_shape));
}
for (const Shape& element_shape : accumulator_shape.tuple_shapes()) {
@@ -94,7 +93,7 @@ Status VerifyReducerShape(
return InvalidArgument(
"Reduction function must produce a scalar or tuple of scalars, but has "
"shape: %s",
- ShapeUtil::HumanString(accumulator_shape).c_str());
+ ShapeUtil::HumanString(accumulator_shape));
}
for (const Shape* element_shape : accumulator_subshapes) {
@@ -102,7 +101,7 @@ Status VerifyReducerShape(
return InvalidArgument(
"Reduction function must return a scalar or tuple of scalars but "
"returns shape: %s",
- ShapeUtil::HumanString(accumulator_shape).c_str());
+ ShapeUtil::HumanString(accumulator_shape));
}
}
@@ -113,19 +112,19 @@ Status VerifyReducerShape(
if (!ShapeUtil::Compatible(*accumulator_subshapes[i],
reducer_shape.parameters(i))) {
return InvalidArgument(
- "Reduction function's %lld-th parameter shape differs from the "
+ "Reduction function's %d-th parameter shape differs from the "
"result shape: %s vs %s",
- i, ShapeUtil::HumanString(reducer_shape.parameters(i)).c_str(),
- ShapeUtil::HumanString(*accumulator_subshapes[i]).c_str());
+ i, ShapeUtil::HumanString(reducer_shape.parameters(i)),
+ ShapeUtil::HumanString(*accumulator_subshapes[i]));
}
// Check that init_value's shapes are suitable for reducer_shape.
if (!ShapeUtil::CompatibleIgnoringFpPrecision(*accumulator_subshapes[i],
*init_value_shapes[i])) {
return InvalidArgument(
- "Reduction function's accumulator shape at index %lld differs from "
+ "Reduction function's accumulator shape at index %d differs from "
"the init_value shape: %s vs %s",
- i, ShapeUtil::HumanString(*accumulator_subshapes[i]).c_str(),
- ShapeUtil::HumanString(*init_value_shapes[i]).c_str());
+ i, ShapeUtil::HumanString(*accumulator_subshapes[i]),
+ ShapeUtil::HumanString(*init_value_shapes[i]));
}
// Check that the inputs can be passed in as the non-accumulator arguments.
const Shape input_element_shape =
@@ -133,11 +132,11 @@ Status VerifyReducerShape(
if (!ShapeUtil::CompatibleIgnoringFpPrecision(
input_element_shape, reducer_shape.parameters(inputs + i))) {
return InvalidArgument(
- "Reduction function's %lld-th parameter shape differs from the "
+ "Reduction function's %d-th parameter shape differs from the "
"input type element type: %s vs %s",
inputs + i,
- ShapeUtil::HumanString(reducer_shape.parameters(inputs + i)).c_str(),
- ShapeUtil::HumanString(input_element_shape).c_str());
+ ShapeUtil::HumanString(reducer_shape.parameters(inputs + i)),
+ ShapeUtil::HumanString(input_element_shape));
}
// Check that the accumulator and inputs to the reducer function match.
// If the accumulator is scalar, it must have the same type as the inputs
@@ -147,11 +146,11 @@ Status VerifyReducerShape(
if (!ShapeUtil::CompatibleIgnoringFpPrecision(
*accumulator_subshapes[i], reducer_shape.parameters(inputs + i))) {
return InvalidArgument(
- "Reduction function's %lld-th parameter shape must "
+ "Reduction function's %d-th parameter shape must "
"match the result shape, but got %s vs %s.",
inputs + i,
- ShapeUtil::HumanString(reducer_shape.parameters(inputs + i)).c_str(),
- ShapeUtil::HumanString(*accumulator_subshapes[i]).c_str());
+ ShapeUtil::HumanString(reducer_shape.parameters(inputs + i)),
+ ShapeUtil::HumanString(*accumulator_subshapes[i]));
}
}
@@ -164,7 +163,7 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
bool allow_negative_padding) {
if (window.dimensions_size() != ShapeUtil::Rank(base_shape)) {
return InvalidArgument(
- "Window has dimension %d but base shape has dimension %lld.",
+ "Window has dimension %d but base shape has dimension %d.",
window.dimensions_size(), ShapeUtil::Rank(base_shape));
}
@@ -173,29 +172,29 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
const auto& dim = window.dimensions(i);
if (dim.size() <= 0) {
return InvalidArgument("Window %s has a non-positive dimension.",
- window.DebugString().c_str());
+ window.DebugString());
}
if (dim.stride() <= 0) {
return InvalidArgument("Window %s has a non-positive stride.",
- window.DebugString().c_str());
+ window.DebugString());
}
if (!allow_negative_padding && dim.padding_low() < 0) {
return InvalidArgument("Window %s has a negative low padding.",
- window.DebugString().c_str());
+ window.DebugString());
}
if (!allow_negative_padding && dim.padding_high() < 0) {
return InvalidArgument("Window %s has a negative high padding.",
- window.DebugString().c_str());
+ window.DebugString());
}
if (dim.base_dilation() < 1) {
return InvalidArgument(
"Window %s has a non-positive base area dilation factor.",
- window.DebugString().c_str());
+ window.DebugString());
}
if (dim.window_dilation() < 1) {
return InvalidArgument(
"Window %s has a non-positive window dilation factor.",
- window.DebugString().c_str());
+ window.DebugString());
}
const int64 dilated_base = window_util::DilatedBound(
@@ -238,8 +237,7 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
return InvalidArgument(
"Expected element type in shape to be floating for %s operation; "
"got %s.",
- HloOpcodeString(opcode).c_str(),
- PrimitiveType_Name(shape.element_type()).c_str());
+ HloOpcodeString(opcode), PrimitiveType_Name(shape.element_type()));
}
return shape;
case HloOpcode::kCos:
@@ -254,8 +252,7 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
return InvalidArgument(
"Expected element type in shape to be floating or complex for %s "
"operation; got %s.",
- HloOpcodeString(opcode).c_str(),
- PrimitiveType_Name(shape.element_type()).c_str());
+ HloOpcodeString(opcode), PrimitiveType_Name(shape.element_type()));
}
return shape;
case HloOpcode::kReal:
@@ -268,8 +265,7 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
return InvalidArgument(
"Expected element type in shape to be floating or complex for "
"%s operation; got %s.",
- HloOpcodeString(opcode).c_str(),
- PrimitiveType_Name(shape.element_type()).c_str());
+ HloOpcodeString(opcode), PrimitiveType_Name(shape.element_type()));
}
case HloOpcode::kAbs:
if (ShapeUtil::ElementIsComplex(shape)) {
@@ -281,15 +277,14 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
return InvalidArgument(
"Expected element type in shape to be floating or complex for "
"%s operation; got %s.",
- HloOpcodeString(opcode).c_str(),
- PrimitiveType_Name(shape.element_type()).c_str());
+ HloOpcodeString(opcode), PrimitiveType_Name(shape.element_type()));
}
case HloOpcode::kClz:
if (!ShapeUtil::ElementIsIntegral(shape)) {
return InvalidArgument(
"Expected an integral element type in argument to Clz "
"operation; got %s.",
- PrimitiveType_Name(shape.element_type()).c_str());
+ PrimitiveType_Name(shape.element_type()));
}
return shape;
case HloOpcode::kNegate:
@@ -299,8 +294,7 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
return InvalidArgument(
"Expected element type in shape to be integral, floating or "
"complex for %s operation; got %s.",
- HloOpcodeString(opcode).c_str(),
- PrimitiveType_Name(shape.element_type()).c_str());
+ HloOpcodeString(opcode), PrimitiveType_Name(shape.element_type()));
}
return shape;
case HloOpcode::kSign:
@@ -309,8 +303,7 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
return InvalidArgument(
"Expected element type in shape to be signed or complex for "
"%s operation; got %s.",
- HloOpcodeString(opcode).c_str(),
- PrimitiveType_Name(shape.element_type()).c_str());
+ HloOpcodeString(opcode), PrimitiveType_Name(shape.element_type()));
}
return shape;
@@ -320,7 +313,7 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
return InvalidArgument(
"Expected pred or an integral element type in argument to Not "
"operation; got %s.",
- PrimitiveType_Name(shape.element_type()).c_str());
+ PrimitiveType_Name(shape.element_type()));
}
return shape;
@@ -330,14 +323,14 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
"Expected element type in shape to be floating "
"point for IsFinite "
"operation; got %s.",
- PrimitiveType_Name(shape.element_type()).c_str());
+ PrimitiveType_Name(shape.element_type()));
}
return ShapeUtil::ChangeElementType(shape, PRED);
default:
return InvalidArgument(
"Unknown operation for unary shape inference: \"%s\".",
- HloOpcodeString(opcode).c_str());
+ HloOpcodeString(opcode));
}
}
@@ -348,7 +341,7 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
return InvalidArgument("Concatenate expects at least one argument.");
}
if (dimension < 0 || dimension >= ShapeUtil::Rank(*arg_shapes[0])) {
- return InvalidArgument("Concatenate dimension out of bounds: %lld.",
+ return InvalidArgument("Concatenate dimension out of bounds: %d.",
dimension);
}
const Shape* arg_shape = nullptr;
@@ -362,17 +355,16 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
}
if (ShapeUtil::Rank(*arg_shape) != ShapeUtil::Rank(*shape)) {
return InvalidArgument(
- "Cannot concatenate arrays with different ranks: %lld (%s) vs %lld "
+ "Cannot concatenate arrays with different ranks: %d (%s) vs %d "
"(%s).",
- ShapeUtil::Rank(*arg_shape),
- ShapeUtil::HumanString(*arg_shape).c_str(), ShapeUtil::Rank(*shape),
- ShapeUtil::HumanString(*shape).c_str());
+ ShapeUtil::Rank(*arg_shape), ShapeUtil::HumanString(*arg_shape),
+ ShapeUtil::Rank(*shape), ShapeUtil::HumanString(*shape));
}
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(*arg_shape, *shape)) {
return InvalidArgument(
"Cannot concatenate arrays with different element types: %s vs %s.",
- PrimitiveType_Name(arg_shape->element_type()).c_str(),
- PrimitiveType_Name(shape->element_type()).c_str());
+ PrimitiveType_Name(arg_shape->element_type()),
+ PrimitiveType_Name(shape->element_type()));
}
for (int64 dimension_number = 0;
dimension_number < ShapeUtil::Rank(*arg_shape); ++dimension_number) {
@@ -385,9 +377,9 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
return InvalidArgument(
"Cannot concatenate arrays that differ in dimensions other than "
"the one being concatenated (the other array dimensions must be "
- "the same): %s vs %s in dimension %lld.",
- ShapeUtil::HumanString(*arg_shape).c_str(),
- ShapeUtil::HumanString(*shape).c_str(), dimension);
+ "the same): %s vs %s in dimension %d.",
+ ShapeUtil::HumanString(*arg_shape), ShapeUtil::HumanString(*shape),
+ dimension);
}
}
element_type = ShapeUtil::HigherPrecisionElementType(*shape, *arg_shape);
@@ -419,8 +411,8 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
!primitive_util::IsComplexType(new_element_type)) {
return Unimplemented(
"Conversion from complex to real type %s => %s is not implemented.",
- ShapeUtil::HumanString(operand_shape).c_str(),
- PrimitiveType_Name(new_element_type).c_str());
+ ShapeUtil::HumanString(operand_shape),
+ PrimitiveType_Name(new_element_type));
}
if (!ShapeUtil::IsArray(operand_shape) ||
!primitive_util::IsArrayType(new_element_type)) {
@@ -429,8 +421,8 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
// are valid. For now we just reject them, though.
return InvalidArgument(
"Convert does not allow non-arrays, so cannot convert from %s to %s.",
- ShapeUtil::HumanString(operand_shape).c_str(),
- PrimitiveType_Name(new_element_type).c_str());
+ ShapeUtil::HumanString(operand_shape),
+ PrimitiveType_Name(new_element_type));
}
return ShapeUtil::ChangeElementType(operand_shape, new_element_type);
@@ -442,8 +434,8 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
if (primitive_util::IsComplexType(old_element_type) !=
primitive_util::IsComplexType(new_element_type)) {
return InvalidArgument("Conversion from complex to real type %s => %s.",
- ShapeUtil::HumanString(operand_shape).c_str(),
- PrimitiveType_Name(new_element_type).c_str());
+ ShapeUtil::HumanString(operand_shape),
+ PrimitiveType_Name(new_element_type));
}
if (!ShapeUtil::IsArray(operand_shape) ||
!primitive_util::IsArrayType(new_element_type)) {
@@ -452,15 +444,15 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
// are valid. For now we just reject them, though.
return InvalidArgument(
"Cannot convert from or to tuple type; requested conversion: %s => %s.",
- ShapeUtil::HumanString(operand_shape).c_str(),
- PrimitiveType_Name(new_element_type).c_str());
+ ShapeUtil::HumanString(operand_shape),
+ PrimitiveType_Name(new_element_type));
}
if (primitive_util::BitWidth(old_element_type) !=
primitive_util::BitWidth(new_element_type)) {
return InvalidArgument(
"Cannot bitcast types with different bit-widths: %s => %s.",
- PrimitiveType_Name(old_element_type).c_str(),
- PrimitiveType_Name(new_element_type).c_str());
+ PrimitiveType_Name(old_element_type),
+ PrimitiveType_Name(new_element_type));
}
return ShapeUtil::ChangeElementType(operand_shape, new_element_type);
@@ -473,7 +465,7 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
return InvalidArgument(
"Expected element type in shape to be floating point for "
"ReducePrecision operation; got %s.",
- PrimitiveType_Name(operand_shape.element_type()).c_str());
+ PrimitiveType_Name(operand_shape.element_type()));
}
if (exponent_bits < 1) {
// One exponent bit is necessary to distinguish 0 from infinity. Having
@@ -505,8 +497,8 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
return InvalidArgument(
"The rank of the operand and the padding configuration do not match: "
"%s vs %s.",
- ShapeUtil::HumanString(operand_shape).c_str(),
- padding_config.ShortDebugString().c_str());
+ ShapeUtil::HumanString(operand_shape),
+ padding_config.ShortDebugString());
}
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(operand_shape,
padding_value_shape)) {
@@ -573,7 +565,7 @@ Status ValidateDotDimensionNumbers(
!dims_in_range(ShapeUtil::Rank(rhs), rhs_contracting_dimensions,
rhs_batch_dimensions)) {
return InvalidArgument("A dimension number is out of range in Dot: %s.",
- dimension_numbers.DebugString().c_str());
+ dimension_numbers.DebugString());
}
// Check that dimension numbers are unique.
@@ -591,7 +583,7 @@ Status ValidateDotDimensionNumbers(
if (!dims_unique(lhs_contracting_dimensions, lhs_batch_dimensions) ||
!dims_unique(rhs_contracting_dimensions, rhs_batch_dimensions)) {
return InvalidArgument("A dimension number is not unique in Dot: %s.",
- dimension_numbers.DebugString().c_str());
+ dimension_numbers.DebugString());
}
// Check that the count of non-contracting-non-batch dimensions is in {0, 1}.
@@ -636,14 +628,13 @@ Status ValidateDotDimensionNumbers(
TF_RETURN_IF_ERROR(ExpectArray(rhs, "rhs of dot"));
auto fail = [lhs, rhs](const string& addendum) -> Status {
- string message = tensorflow::strings::Printf(
- "Cannot infer shape for dot operation: %s <dot> %s.",
- ShapeUtil::HumanString(lhs).c_str(),
- ShapeUtil::HumanString(rhs).c_str());
+ string message =
+ StrFormat("Cannot infer shape for dot operation: %s <dot> %s.",
+ ShapeUtil::HumanString(lhs), ShapeUtil::HumanString(rhs));
if (!addendum.empty()) {
message += " " + addendum;
}
- return InvalidArgument("%s", message.c_str());
+ return InvalidArgument("%s", message);
};
// Check if both element types are the same.
@@ -739,9 +730,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
} else {
return InvalidArgument(
"Binary op %s with incompatible shapes: %s and %s.",
- HloOpcodeString(operation).c_str(),
- ShapeUtil::HumanString(lhs).c_str(),
- ShapeUtil::HumanString(rhs).c_str());
+ HloOpcodeString(operation), ShapeUtil::HumanString(lhs),
+ ShapeUtil::HumanString(rhs));
}
}
return ShapeUtil::MakeShape(ShapeUtil::HigherPrecisionElementType(lhs, rhs),
@@ -756,14 +746,14 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
// the user to provide an explicit broadcast dimension in this case.
// See b/25177275 for more details.
return InvalidArgument("Automatic shape inference not supported: %s and %s",
- ShapeUtil::HumanString(smaller_shape).c_str(),
- ShapeUtil::HumanString(larger_shape).c_str());
+ ShapeUtil::HumanString(smaller_shape),
+ ShapeUtil::HumanString(larger_shape));
} else if (broadcast_dimensions.size() != ShapeUtil::Rank(smaller_shape)) {
return InvalidArgument(
"Size of broadcast_dimensions has to match lower-rank operand's "
"rank; "
- " lower-rank operand's rank is %lld, size of broadcast_dimensions is "
- "%zu.",
+ " lower-rank operand's rank is %d, size of broadcast_dimensions is "
+ "%u.",
ShapeUtil::Rank(smaller_shape), broadcast_dimensions.size());
}
@@ -813,12 +803,12 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
int64 dimension_to_match = broadcast_dimensions.at(i);
if (dimension_to_match < 0) {
return InvalidArgument(
- "Broadcast dimension number (%lld) cannot be negative.",
+ "Broadcast dimension number (%d) cannot be negative.",
dimension_to_match);
}
if (dimension_to_match >= larger_shape.dimensions_size()) {
return InvalidArgument(
- "Broadcast dimension number (%lld) too large; higher-rank "
+ "Broadcast dimension number (%d) too large; higher-rank "
"operand has rank %d.",
dimension_to_match, larger_shape.dimensions_size());
}
@@ -830,16 +820,16 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (small_dimension_size != large_dimension_size &&
small_dimension_size != 1 && large_dimension_size != 1) {
return InvalidArgument(
- "Broadcast dimension %d mismatch: %lld != %lld; %s and %s.", i,
+ "Broadcast dimension %d mismatch: %d != %d; %s and %s.", i,
small_dimension_size, large_dimension_size,
- ShapeUtil::HumanString(smaller_shape).c_str(),
- ShapeUtil::HumanString(larger_shape).c_str());
+ ShapeUtil::HumanString(smaller_shape),
+ ShapeUtil::HumanString(larger_shape));
}
// Make sure the broadcast dimensions are listed in a strictly increasing
// order.
if (i > 0 && broadcast_dimensions.at(i - 1) >= dimension_to_match) {
return InvalidArgument(
- "Broadcast dimensions order is wrong: %lld comes after %lld.",
+ "Broadcast dimensions order is wrong: %d comes after %d.",
dimension_to_match, broadcast_dimensions.at(i - 1));
}
@@ -858,8 +848,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(lhs, rhs)) {
return InvalidArgument(
"Binary op %s with different element types: %s and %s.",
- HloOpcodeString(operation).c_str(), ShapeUtil::HumanString(lhs).c_str(),
- ShapeUtil::HumanString(rhs).c_str());
+ HloOpcodeString(operation), ShapeUtil::HumanString(lhs),
+ ShapeUtil::HumanString(rhs));
}
if (ShapeUtil::Rank(lhs) == ShapeUtil::Rank(rhs)) {
@@ -909,11 +899,10 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
/* static */ StatusOr<Shape> ShapeInference::InferBinaryOpShape(
HloOpcode opcode, const Shape& lhs, const Shape& rhs,
tensorflow::gtl::ArraySlice<int64> broadcast_dimensions) {
- VLOG(2) << tensorflow::strings::Printf(
+ VLOG(2) << StrFormat(
"inferring shape for <%s>(%s, %s) with broadcast_dimensions={%s}",
- HloOpcodeString(opcode).c_str(), ShapeUtil::HumanString(lhs).c_str(),
- ShapeUtil::HumanString(rhs).c_str(),
- StrJoin(broadcast_dimensions, ", ").c_str());
+ HloOpcodeString(opcode), ShapeUtil::HumanString(lhs),
+ ShapeUtil::HumanString(rhs), StrJoin(broadcast_dimensions, ", "));
TF_DCHECK_OK(ShapeUtil::ValidateShapeWithOptionalLayout(lhs));
TF_DCHECK_OK(ShapeUtil::ValidateShapeWithOptionalLayout(rhs));
@@ -942,7 +931,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Expected element type in shape to be floating for complex compose "
"operation; got %s.",
- PrimitiveType_Name(lhs.element_type()).c_str());
+ PrimitiveType_Name(lhs.element_type()));
}
TF_ASSIGN_OR_RETURN(const Shape& shape,
InferElementwiseBinaryOpShape(opcode, lhs, rhs,
@@ -961,7 +950,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Expected pred or integral type in argument to and/or operation; "
"got %s.",
- PrimitiveType_Name(lhs.element_type()).c_str());
+ PrimitiveType_Name(lhs.element_type()));
}
return InferElementwiseBinaryOpShape(opcode, lhs, rhs,
broadcast_dimensions);
@@ -979,8 +968,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
default:
return Unimplemented(
"Binary op shape inference: %s; lhs: %s; rhs: %s is not implemented.",
- HloOpcodeString(opcode).c_str(), lhs.ShortDebugString().c_str(),
- rhs.ShortDebugString().c_str());
+ HloOpcodeString(opcode), lhs.ShortDebugString(),
+ rhs.ShortDebugString());
}
}
@@ -1003,8 +992,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
case HloOpcode::kTupleSelect:
return InferTupleSelectShape(lhs, rhs, ehs);
default:
- return InvalidArgument("Unknown operation %s.",
- HloOpcodeString(opcode).c_str());
+ return InvalidArgument("Unknown operation %s.", HloOpcodeString(opcode));
}
}
@@ -1043,8 +1031,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Sort keys and values dimensions must match. "
"Keys shape is: %s\n, Values shape is: %s",
- ShapeUtil::HumanString(*operand_shapes[0]).c_str(),
- ShapeUtil::HumanString(*operand_shapes[1]).c_str());
+ ShapeUtil::HumanString(*operand_shapes[0]),
+ ShapeUtil::HumanString(*operand_shapes[1]));
}
return ShapeUtil::MakeTupleShape(
{*operand_shapes[0], *operand_shapes[1]});
@@ -1052,8 +1040,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument("Unexpected number of operands for sort");
}
default:
- return InvalidArgument("Unknown operation %s.",
- HloOpcodeString(opcode).c_str());
+ return InvalidArgument("Unknown operation %s.", HloOpcodeString(opcode));
}
}
@@ -1091,7 +1078,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Map operation requires all operands to have the same shape; got: "
"%s.",
- StrJoin(pieces, ", ").c_str());
+ StrJoin(pieces, ", "));
}
// Check that dimensions.size == arg_shape.dimensions_size() (we currently
@@ -1099,7 +1086,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (dimensions.size() != arg_shape->dimensions_size()) {
return InvalidArgument(
"Map applied to a subset of dimensions currently not supported: "
- "arg_dimension_size: %d, requested_map_dimensions_size: %zu.",
+ "arg_dimension_size: %d, requested_map_dimensions_size: %u.",
arg_shape->dimensions_size(), dimensions.size());
}
@@ -1108,7 +1095,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (dimensions[i] != i) {
return InvalidArgument(
"Map requires monotonically increasing dimension numbers; got: %s.",
- StrJoin(dimensions, ", ").c_str());
+ StrJoin(dimensions, ", "));
}
}
@@ -1116,7 +1103,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (arg_shapes.size() != to_apply.parameters_size()) {
return InvalidArgument(
"Map applied function arity must match number of arguments; got: "
- "arity: %d, arguments: %zu.",
+ "arity: %d, arguments: %u.",
to_apply.parameters_size(), arg_shapes.size());
}
@@ -1125,7 +1112,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (!ShapeUtil::IsScalar(output_shape)) {
return InvalidArgument(
"Mapped computation's result has to be a scalar; got: %s.",
- ShapeUtil::HumanString(output_shape).c_str());
+ ShapeUtil::HumanString(output_shape));
}
for (int i = 0; i < to_apply.parameters_size(); ++i) {
@@ -1135,7 +1122,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Mapped computation's parameter has to be a scalar; "
"got parameter %d shape: %s.",
- i, ShapeUtil::HumanString(parameter_shape).c_str());
+ i, ShapeUtil::HumanString(parameter_shape));
}
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(parameter_shape,
@@ -1143,8 +1130,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Mapped computation's parameter type has to match argument element "
"type; got parameter %d shape: %s, argument shape: %s.",
- i, ShapeUtil::HumanString(parameter_shape).c_str(),
- ShapeUtil::HumanString(*arg_shape).c_str());
+ i, ShapeUtil::HumanString(parameter_shape),
+ ShapeUtil::HumanString(*arg_shape));
}
}
@@ -1173,35 +1160,35 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Expected feature_index of batch-norm-training to be "
"smaller than the rank of operand_shape; "
- "got feature_index %lld, and rank %lld.",
+ "got feature_index %d, and rank %d.",
feature_index, ShapeUtil::Rank(operand_shape));
}
if (feature_index < 0) {
return InvalidArgument(
"Expected feature_index of batch-norm-training to "
- "be a non-negative number, got %lld.",
+ "be a non-negative number, got %d.",
feature_index);
}
if (ShapeUtil::Rank(operand_shape) < 1) {
return InvalidArgument(
"Expected the rank of operand to "
- "batch-norm-training to be at least 1; got %lld.",
+ "batch-norm-training to be at least 1; got %d.",
ShapeUtil::Rank(operand_shape));
}
if (ShapeUtil::Rank(offset_shape) != 1) {
return InvalidArgument(
"Offset input of batch-norm-training must have"
- " rank 1, but has rank %lld.",
+ " rank 1, but has rank %d.",
ShapeUtil::Rank(offset_shape));
}
if (ShapeUtil::Rank(scale_shape) != 1) {
return InvalidArgument(
"Scale input of batch-norm-training must have"
- " rank 1, but has rank %lld.",
+ " rank 1, but has rank %d.",
ShapeUtil::Rank(scale_shape));
}
@@ -1209,7 +1196,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"The operand to batch-norm-training must have a floating point "
"element type, but the shape is %s.",
- PrimitiveType_Name(operand_shape.element_type()).c_str());
+ PrimitiveType_Name(operand_shape.element_type()));
}
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(offset_shape,
@@ -1218,8 +1205,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
"The inputs should have the same element type for batch-norm-training, "
"but the shape of offset factor is %s "
"and the shape of operand is %s.",
- PrimitiveType_Name(offset_shape.element_type()).c_str(),
- PrimitiveType_Name(operand_shape.element_type()).c_str());
+ PrimitiveType_Name(offset_shape.element_type()),
+ PrimitiveType_Name(operand_shape.element_type()));
}
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(scale_shape,
@@ -1228,8 +1215,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
"The inputs should have the same element type for batch-norm-training, "
"but the shape of scale factor is %s "
"and the shape of operand is %s.",
- PrimitiveType_Name(scale_shape.element_type()).c_str(),
- PrimitiveType_Name(operand_shape.element_type()).c_str());
+ PrimitiveType_Name(scale_shape.element_type()),
+ PrimitiveType_Name(operand_shape.element_type()));
}
const int64 feature_count = operand_shape.dimensions(feature_index);
@@ -1239,16 +1226,16 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (ShapeUtil::GetDimension(offset_shape, 0) != feature_count) {
return InvalidArgument(
"The size of offset factor should be the same as feature count,"
- "but the size of offset factor is %lld "
- "and the feature count is %lld.",
+ "but the size of offset factor is %d "
+ "and the feature count is %d.",
ShapeUtil::GetDimension(offset_shape, 0), feature_count);
}
if (ShapeUtil::GetDimension(scale_shape, 0) != feature_count) {
return InvalidArgument(
"The size of scale factor should be the same as feature count,"
- "but the size of scale factor is %lld "
- "and the feature count is %lld.",
+ "but the size of scale factor is %d "
+ "and the feature count is %d.",
ShapeUtil::GetDimension(scale_shape, 0), feature_count);
}
@@ -1283,35 +1270,35 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Expected feature_index of batch-norm-inference to be "
"smaller than the rank of operand_shape; "
- "got feature_index %lld, and rank %lld.",
+ "got feature_index %d, and rank %d.",
feature_index, ShapeUtil::Rank(operand_shape));
}
if (feature_index < 0) {
return InvalidArgument(
"Expected feature_index of batch-norm-inference to "
- "be a non-negative number, got %lld.",
+ "be a non-negative number, got %d.",
feature_index);
}
if (ShapeUtil::Rank(operand_shape) < 1) {
return InvalidArgument(
"Expected the rank of operand to "
- "batch-norm-inference to be at least 1; got %lld.",
+ "batch-norm-inference to be at least 1; got %d.",
ShapeUtil::Rank(operand_shape));
}
if (ShapeUtil::Rank(offset_shape) != 1) {
return InvalidArgument(
"Offset input of batch-norm-inference must have"
- " rank 1, but has rank %lld.",
+ " rank 1, but has rank %d.",
ShapeUtil::Rank(offset_shape));
}
if (ShapeUtil::Rank(scale_shape) != 1) {
return InvalidArgument(
"Scale input of batch-norm-inference must have"
- " rank 1, but has rank %lld.",
+ " rank 1, but has rank %d.",
ShapeUtil::Rank(scale_shape));
}
@@ -1319,7 +1306,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"The operand to batch-norm-inference must have a floating point "
"element type, but the shape is %s.",
- PrimitiveType_Name(operand_shape.element_type()).c_str());
+ PrimitiveType_Name(operand_shape.element_type()));
}
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(offset_shape,
@@ -1329,8 +1316,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
"batch-norm-inference, "
"but the shape of offset factor is %s "
"and the shape of operand is %s.",
- PrimitiveType_Name(offset_shape.element_type()).c_str(),
- PrimitiveType_Name(operand_shape.element_type()).c_str());
+ PrimitiveType_Name(offset_shape.element_type()),
+ PrimitiveType_Name(operand_shape.element_type()));
}
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(scale_shape,
@@ -1340,8 +1327,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
"batch-norm-inference, "
"but the shape of scale factor is %s "
"and the shape of operand is %s.",
- PrimitiveType_Name(scale_shape.element_type()).c_str(),
- PrimitiveType_Name(operand_shape.element_type()).c_str());
+ PrimitiveType_Name(scale_shape.element_type()),
+ PrimitiveType_Name(operand_shape.element_type()));
}
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(mean_shape,
@@ -1351,8 +1338,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
"batch-norm-inference, "
"but the shape of mean is %s "
"and the shape of operand is %s.",
- PrimitiveType_Name(mean_shape.element_type()).c_str(),
- PrimitiveType_Name(operand_shape.element_type()).c_str());
+ PrimitiveType_Name(mean_shape.element_type()),
+ PrimitiveType_Name(operand_shape.element_type()));
}
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(variance_shape,
@@ -1362,8 +1349,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
"batch-norm-inference, "
"but the shape of variance is %s "
"and the shape of operand is %s.",
- PrimitiveType_Name(mean_shape.element_type()).c_str(),
- PrimitiveType_Name(variance_shape.element_type()).c_str());
+ PrimitiveType_Name(mean_shape.element_type()),
+ PrimitiveType_Name(variance_shape.element_type()));
}
const int64 feature_count = operand_shape.dimensions(feature_index);
@@ -1373,32 +1360,32 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (ShapeUtil::GetDimension(offset_shape, 0) != feature_count) {
return InvalidArgument(
"The size of offset factor should be the same as feature count,"
- "but the size of offset factor is %lld "
- "and the feature count is %lld.",
+ "but the size of offset factor is %d "
+ "and the feature count is %d.",
ShapeUtil::GetDimension(offset_shape, 0), feature_count);
}
if (ShapeUtil::GetDimension(scale_shape, 0) != feature_count) {
return InvalidArgument(
"The size of scale factor should be the same as feature count,"
- "but the size of scale factor is %lld "
- "and the feature count is %lld.",
+ "but the size of scale factor is %d "
+ "and the feature count is %d.",
ShapeUtil::GetDimension(scale_shape, 0), feature_count);
}
if (ShapeUtil::GetDimension(mean_shape, 0) != feature_count) {
return InvalidArgument(
"The size of mean should be the same as feature count,"
- "but the size of mean is %lld "
- "and the feature count is %lld.",
+ "but the size of mean is %d "
+ "and the feature count is %d.",
ShapeUtil::GetDimension(mean_shape, 0), feature_count);
}
if (ShapeUtil::GetDimension(variance_shape, 0) != feature_count) {
return InvalidArgument(
"The size of variance should be the same as feature count,"
- "but the size of variance is %lld "
- "and the feature count is %lld.",
+ "but the size of variance is %d "
+ "and the feature count is %d.",
ShapeUtil::GetDimension(variance_shape, 0), feature_count);
}
@@ -1428,36 +1415,36 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Expected feature_index of batch-norm-grad to be "
"smaller than the rank of operand_shape; "
- "got feature_index %lld, and rank %lld.",
+ "got feature_index %d, and rank %d.",
feature_index, ShapeUtil::Rank(operand_shape));
}
if (ShapeUtil::Rank(operand_shape) != ShapeUtil::Rank(output_grad_shape)) {
return InvalidArgument(
"Expected operand_shape of batch-norm-grad to have the same rank as"
- " output_grad_shape; got rank(oprand_shape) %lld, and"
- " rank(output_grad_shape) %lld.",
+ " output_grad_shape; got rank(oprand_shape) %d, and"
+ " rank(output_grad_shape) %d.",
ShapeUtil::Rank(operand_shape), ShapeUtil::Rank(output_grad_shape));
}
if (ShapeUtil::Rank(mean_shape) != 1) {
return InvalidArgument(
"Mean input of batch-norm-grad must have"
- " rank 1, but has rank %lld.",
+ " rank 1, but has rank %d.",
ShapeUtil::Rank(mean_shape));
}
if (ShapeUtil::Rank(scale_shape) != 1) {
return InvalidArgument(
"Scale input of batch-norm-grad must have"
- " rank 1, but has rank %lld.",
+ " rank 1, but has rank %d.",
ShapeUtil::Rank(scale_shape));
}
if (ShapeUtil::Rank(var_shape) != 1) {
return InvalidArgument(
"Var input of batch-norm-grad must have"
- " rank 1, but has rank %lld.",
+ " rank 1, but has rank %d.",
ShapeUtil::Rank(var_shape));
}
@@ -1465,14 +1452,14 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"The operand to batch-norm-grad must have a floating point "
"element type, but the shape is %s.",
- PrimitiveType_Name(operand_shape.element_type()).c_str());
+ PrimitiveType_Name(operand_shape.element_type()));
}
if (!ShapeUtil::ElementIsFloating(output_grad_shape)) {
return InvalidArgument(
"The output_grad to batch-norm-grad must have a floating point "
"element type, but the shape is %s.",
- PrimitiveType_Name(output_grad_shape.element_type()).c_str());
+ PrimitiveType_Name(output_grad_shape.element_type()));
}
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(output_grad_shape,
@@ -1481,8 +1468,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
"The inputs should have the same element type for batch-norm-grad, "
"but the element type of output_grad is %s "
"and the element type of operand is %s.",
- PrimitiveType_Name(output_grad_shape.element_type()).c_str(),
- PrimitiveType_Name(operand_shape.element_type()).c_str());
+ PrimitiveType_Name(output_grad_shape.element_type()),
+ PrimitiveType_Name(operand_shape.element_type()));
}
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(scale_shape,
@@ -1491,8 +1478,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
"The inputs should have the same element type for batch-norm-grad, "
"but the element type of scale factor is %s "
"and the element type of operand is %s.",
- PrimitiveType_Name(scale_shape.element_type()).c_str(),
- PrimitiveType_Name(operand_shape.element_type()).c_str());
+ PrimitiveType_Name(scale_shape.element_type()),
+ PrimitiveType_Name(operand_shape.element_type()));
}
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(mean_shape,
@@ -1501,8 +1488,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
"The inputs should have the same element type for batch-norm-grad, "
"but the element type of mean is %s "
"and the element type of operand is %s.",
- PrimitiveType_Name(mean_shape.element_type()).c_str(),
- PrimitiveType_Name(operand_shape.element_type()).c_str());
+ PrimitiveType_Name(mean_shape.element_type()),
+ PrimitiveType_Name(operand_shape.element_type()));
}
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(var_shape,
@@ -1511,8 +1498,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
"The inputs should have the same element type for batch-norm-grad, "
"but the element type of mean is %s "
"and the element type of operand is %s.",
- PrimitiveType_Name(mean_shape.element_type()).c_str(),
- PrimitiveType_Name(operand_shape.element_type()).c_str());
+ PrimitiveType_Name(mean_shape.element_type()),
+ PrimitiveType_Name(operand_shape.element_type()));
}
const int64 feature_count = operand_shape.dimensions(feature_index);
@@ -1523,24 +1510,24 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (ShapeUtil::GetDimension(mean_shape, 0) != feature_count) {
return InvalidArgument(
"The size of mean should be the same as feature count,"
- "but the size of offset factor is %lld "
- "and the feature count is %lld.",
+ "but the size of offset factor is %d "
+ "and the feature count is %d.",
ShapeUtil::GetDimension(mean_shape, 0), feature_count);
}
if (ShapeUtil::GetDimension(scale_shape, 0) != feature_count) {
return InvalidArgument(
"The size of scale factor should be the same as feature count,"
- "but the size of scale factor is %lld "
- "and the feature count is %lld.",
+ "but the size of scale factor is %d "
+ "and the feature count is %d.",
ShapeUtil::GetDimension(scale_shape, 0), feature_count);
}
if (ShapeUtil::GetDimension(var_shape, 0) != feature_count) {
return InvalidArgument(
"The size of variance should be the same as feature count,"
- "but the size of variance is %lld "
- "and the feature count is %lld.",
+ "but the size of variance is %d "
+ "and the feature count is %d.",
ShapeUtil::GetDimension(var_shape, 0), feature_count);
}
@@ -1550,8 +1537,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
ShapeUtil::GetDimension(output_grad_shape, i)) {
return InvalidArgument(
"The bounds of operand shape should be the same as output_grad's,"
- "but the bound of operand_shape at dimension %lld is %lld "
- "and the bound of output_grad_shape is %lld.",
+ "but the bound of operand_shape at dimension %d is %d "
+ "and the bound of output_grad_shape is %d.",
i, ShapeUtil::GetDimension(operand_shape, i),
ShapeUtil::GetDimension(output_grad_shape, i));
}
@@ -1570,15 +1557,14 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(lhs, rhs)) {
return InvalidArgument(
"Convolution with different element types: %s and %s.",
- ShapeUtil::HumanString(lhs).c_str(),
- ShapeUtil::HumanString(rhs).c_str());
+ ShapeUtil::HumanString(lhs), ShapeUtil::HumanString(rhs));
}
if (dnums.input_spatial_dimensions_size() !=
dnums.kernel_spatial_dimensions_size()) {
return InvalidArgument(
"Both arguments to convolution must have same number of dimensions.\n"
"Window: %s",
- window.DebugString().c_str());
+ window.DebugString());
}
const int num_spatial_dims = dnums.input_spatial_dimensions_size();
@@ -1586,19 +1572,19 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Window must have same number of dimensions as dimension numbers.\n"
"Window: %s\nDimension numbers: %s.",
- window.DebugString().c_str(), dnums.DebugString().c_str());
+ window.DebugString(), dnums.DebugString());
}
const int num_dims = num_spatial_dims + 2;
if (ShapeUtil::Rank(lhs) != num_dims) {
return InvalidArgument(
"The LHS argument to a convolution should have rank %d; lhs: %s.",
- num_dims, ShapeUtil::HumanString(lhs).c_str());
+ num_dims, ShapeUtil::HumanString(lhs));
}
if (ShapeUtil::Rank(rhs) != num_dims) {
return InvalidArgument(
"The RHS argument to a convolution should have rank %d; lhs: %s.",
- num_dims, ShapeUtil::HumanString(lhs).c_str());
+ num_dims, ShapeUtil::HumanString(lhs));
}
TF_DCHECK_OK(ShapeUtil::ValidateShapeWithOptionalLayout(lhs));
TF_DCHECK_OK(ShapeUtil::ValidateShapeWithOptionalLayout(rhs));
@@ -1635,26 +1621,26 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
!std::all_of(output_dnums.begin(), output_dnums.end(), in_range)) {
return InvalidArgument(
"A dimension number is out of range in convolution: %s.",
- dnums.DebugString().c_str());
+ dnums.DebugString());
}
if (input_dnums != expected_dnums) {
return InvalidArgument(
"Input dimensions of convolution must contain each dimension exactly "
"once: %s.",
- dnums.DebugString().c_str());
+ dnums.DebugString());
}
if (window_dnums != expected_dnums) {
return InvalidArgument(
"Window dimensions of convolution must contain each dimension exactly "
"once: %s.",
- dnums.DebugString().c_str());
+ dnums.DebugString());
}
if (output_dnums != expected_dnums) {
return InvalidArgument(
"Output dimensions of convolution must contain each dimension exactly "
"once: %s.",
- dnums.DebugString().c_str());
+ dnums.DebugString());
}
std::vector<int64> input_spatial_dims(num_spatial_dims);
@@ -1675,13 +1661,13 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (input_features != kernel_input_features * feature_group_count) {
return InvalidArgument(
- "Expected LHS feature dimension (value %lld) to match RHS "
- "input feature dimension * feature_group_count (value %lld); "
+ "Expected LHS feature dimension (value %d) to match RHS "
+ "input feature dimension * feature_group_count (value %d); "
"got <conv>(%s, %s)\n"
"Dimension numbers: {%s}.",
input_features, kernel_input_features * feature_group_count,
- ShapeUtil::HumanString(lhs).c_str(),
- ShapeUtil::HumanString(rhs).c_str(), dnums.DebugString().c_str());
+ ShapeUtil::HumanString(lhs), ShapeUtil::HumanString(rhs),
+ dnums.DebugString());
}
std::vector<int64> window_dims(num_spatial_dims);
for (int i = 0; i < num_spatial_dims; ++i) {
@@ -1693,8 +1679,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
"RHS shape: %s\n\t"
"Window: {%s}\n\t"
"Dimension numbers: {%s}.",
- ShapeUtil::HumanString(rhs).c_str(), window.ShortDebugString().c_str(),
- dnums.ShortDebugString().c_str());
+ ShapeUtil::HumanString(rhs), window.ShortDebugString(),
+ dnums.ShortDebugString());
}
Shape base_shape =
@@ -1720,29 +1706,29 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
const tensorflow::gtl::ArraySlice<int64> fft_length) {
const int64 fft_rank = fft_length.size();
if (fft_rank < 1 || fft_rank > 3) {
- return InvalidArgument("FFT only supports ranks 1-3; got %lld.", fft_rank);
+ return InvalidArgument("FFT only supports ranks 1-3; got %d.", fft_rank);
}
-#define RET_CHECK_RANK(x) \
- if (x.dimensions_size() < fft_rank) { \
- return InvalidArgument( \
- "FFT of rank %lld requires input of at least " \
- "same rank; got input of rank %d", \
- fft_rank, x.dimensions_size()); \
+#define RET_CHECK_RANK(x) \
+ if (x.dimensions_size() < fft_rank) { \
+ return InvalidArgument( \
+ "FFT of rank %d requires input of at least " \
+ "same rank; got input of rank %d", \
+ fft_rank, x.dimensions_size()); \
}
switch (fft_type) {
case FFT:
case IFFT:
if (in.element_type() != C64) {
return InvalidArgument("%s requires C64 input type, found %s.",
- FftType_Name(fft_type).c_str(),
- PrimitiveType_Name(in.element_type()).c_str());
+ FftType_Name(fft_type),
+ PrimitiveType_Name(in.element_type()));
}
RET_CHECK_RANK(in);
return in;
case RFFT: {
if (in.element_type() != F32) {
return InvalidArgument("RFFT requires F32 input type, found %s.",
- PrimitiveType_Name(in.element_type()).c_str());
+ PrimitiveType_Name(in.element_type()));
}
RET_CHECK_RANK(in);
for (int i = 0; i < fft_rank; i++) {
@@ -1750,7 +1736,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
fft_length[i]) {
return InvalidArgument(
"RFFT requires innermost dimensions match fft_length but "
- "dimension %lld is %lld and should be %lld.",
+ "dimension %d is %d and should be %d.",
in.dimensions_size() - fft_rank + i,
in.dimensions(in.dimensions_size() - fft_rank + i),
fft_length[i]);
@@ -1764,7 +1750,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
case IRFFT: {
if (in.element_type() != C64) {
return InvalidArgument("IRFFT requires C64 input type, found %s.",
- PrimitiveType_Name(in.element_type()).c_str());
+ PrimitiveType_Name(in.element_type()));
}
RET_CHECK_RANK(in);
Shape result = ShapeUtil::ComplexComponentShape(in);
@@ -1773,7 +1759,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
fft_length[i]) {
return InvalidArgument(
"IRFFT requires all but one innermost dimensions match "
- "fft_length, but dimension %lld is %lld and should be %lld.",
+ "fft_length, but dimension %d is %d and should be %d.",
in.dimensions_size() - fft_rank + i,
in.dimensions(in.dimensions_size() - fft_rank + i),
fft_length[i]);
@@ -1783,7 +1769,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
fft_length[fft_rank - 1] / 2 + 1) {
return InvalidArgument(
"IRFFT requires innermost dimension matches fft_length/2+1, but "
- "dimension %d is %lld and should be %lld.",
+ "dimension %d is %d and should be %d.",
in.dimensions_size() - 1, in.dimensions(in.dimensions_size() - 1),
fft_length[fft_rank - 1] / 2 + 1);
}
@@ -1819,18 +1805,18 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
TF_RET_CHECK(split_count > 0);
if (split_dimension >= ShapeUtil::Rank(shape) || split_dimension < 0) {
return InvalidArgument(
- "AllToAll split_dimension %lld is out-of-bounds in shape %s.",
- split_dimension, ShapeUtil::HumanString(shape).c_str());
+ "AllToAll split_dimension %d is out-of-bounds in shape %s.",
+ split_dimension, ShapeUtil::HumanString(shape));
}
if (concat_dimension >= ShapeUtil::Rank(shape) || concat_dimension < 0) {
return InvalidArgument(
- "AllToAll concat_dimension %lld is out-of-bounds in shape %s.",
- concat_dimension, ShapeUtil::HumanString(shape).c_str());
+ "AllToAll concat_dimension %d is out-of-bounds in shape %s.",
+ concat_dimension, ShapeUtil::HumanString(shape));
}
if (shape.dimensions(split_dimension) % split_count != 0) {
return InvalidArgument(
- "AllToAll split dimension size %lld must be dividable by split_count "
- "%lld.",
+ "AllToAll split dimension size %d must be dividable by split_count "
+ "%d.",
shape.dimensions(split_dimension), split_count);
}
std::vector<int64> new_dimensions(shape.dimensions().begin(),
@@ -1850,8 +1836,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"HLO all-to-all has operands with different shapes: the 0th "
"operand shape %s, but the %dth operand has shape %s.",
- ShapeUtil::HumanString(*operand_shapes[0]).c_str(), i,
- ShapeUtil::HumanString(*operand_shapes[i]).c_str());
+ ShapeUtil::HumanString(*operand_shapes[0]), i,
+ ShapeUtil::HumanString(*operand_shapes[i]));
}
}
@@ -1880,9 +1866,9 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (!ShapeUtil::SameDimensions(*reduced_args[0], *reduced_args[i])) {
return InvalidArgument(
"All reduced tensors must have the sime dimension. Tensor 0 has "
- "shape %s, Tensor %lld has shape %s",
- ShapeUtil::HumanString(*reduced_args[0]).c_str(), i,
- ShapeUtil::HumanString(*reduced_args[i]).c_str());
+ "shape %s, Tensor %d has shape %s",
+ ShapeUtil::HumanString(*reduced_args[0]), i,
+ ShapeUtil::HumanString(*reduced_args[i]));
}
}
@@ -1892,9 +1878,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
const Shape& arg = *reduced_args[0];
for (int64 dimension : dimensions_to_reduce) {
if (dimension >= ShapeUtil::Rank(arg) || dimension < 0) {
- return InvalidArgument(
- "Reducing out-of-bounds dimension %lld in shape %s.", dimension,
- ShapeUtil::HumanString(arg).c_str());
+ return InvalidArgument("Reducing out-of-bounds dimension %d in shape %s.",
+ dimension, ShapeUtil::HumanString(arg));
}
}
@@ -1967,16 +1952,16 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Select function's first parameter shape currently must "
"match the operand element shape, but got %s vs %s.",
- ShapeUtil::HumanString(select_shape.parameters(0)).c_str(),
- ShapeUtil::HumanString(operand_element_shape).c_str());
+ ShapeUtil::HumanString(select_shape.parameters(0)),
+ ShapeUtil::HumanString(operand_element_shape));
}
if (!ShapeUtil::CompatibleIgnoringFpPrecision(operand_element_shape,
select_shape.parameters(1))) {
return InvalidArgument(
"Select function's second parameter shape currently must "
"match the operand element shape, but got %s vs %s.",
- ShapeUtil::HumanString(select_shape.parameters(1)).c_str(),
- ShapeUtil::HumanString(operand_element_shape).c_str());
+ ShapeUtil::HumanString(select_shape.parameters(1)),
+ ShapeUtil::HumanString(operand_element_shape));
}
// Check if the scatter function has a proper shape as a reduction.
@@ -1994,8 +1979,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Source shape does not match the shape of window-reduced operand: "
"source(%s), window-reduced operand(%s).",
- ShapeUtil::HumanString(source_shape).c_str(),
- ShapeUtil::HumanString(window_result_shape).c_str());
+ ShapeUtil::HumanString(source_shape),
+ ShapeUtil::HumanString(window_result_shape));
}
return operand_shape;
}
@@ -2008,29 +1993,27 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"%s in slice operation; argument shape: %s; starts: {%s}; limits: "
"{%s}; strides: {%s}.",
- message.c_str(), ShapeUtil::HumanString(arg).c_str(),
- StrJoin(starts, ",").c_str(), StrJoin(limits, ",").c_str(),
- StrJoin(strides, ",").c_str());
+ message, ShapeUtil::HumanString(arg), StrJoin(starts, ","),
+ StrJoin(limits, ","), StrJoin(strides, ","));
};
TF_RETURN_IF_ERROR(ExpectArray(arg, "operand of slice"));
- VLOG(2) << tensorflow::strings::Printf(
- "slicing shape %s starts={%s} limits={%s}",
- ShapeUtil::HumanString(arg).c_str(), StrJoin(starts, ", ").c_str(),
- StrJoin(limits, ", ").c_str());
+ VLOG(2) << StrFormat("slicing shape %s starts={%s} limits={%s}",
+ ShapeUtil::HumanString(arg), StrJoin(starts, ", "),
+ StrJoin(limits, ", "));
if (starts.size() != limits.size()) {
- return error(Printf("slice start and limit sizes differ: %zu vs %zu",
- starts.size(), limits.size()));
+ return error(StrFormat("slice start and limit sizes differ: %u vs %u",
+ starts.size(), limits.size()));
}
if (starts.size() != strides.size()) {
- return error(Printf("slice start and strides sizes differ: %zu vs %zu",
- starts.size(), strides.size()));
+ return error(StrFormat("slice start and strides sizes differ: %u vs %u",
+ starts.size(), strides.size()));
}
if (starts.size() != ShapeUtil::Rank(arg)) {
return InvalidArgument(
- "Slice index count does not match argument rank: %zu vs %lld.",
+ "Slice index count does not match argument rank: %u vs %d.",
starts.size(), ShapeUtil::Rank(arg));
}
@@ -2040,27 +2023,24 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
int64 limit_index = limits[dimension];
int64 stride = strides[dimension];
if (start_index < 0) {
- return InvalidArgument("Negative start index to slice: %lld.",
- start_index);
+ return InvalidArgument("Negative start index to slice: %d.", start_index);
}
if (limit_index > arg.dimensions(dimension)) {
return error(
- Printf("limit index (%lld) must be less than or equal to dimension "
- "size (%lld)",
- limit_index, arg.dimensions(dimension)));
- }
- VLOG(2) << tensorflow::strings::Printf("starts[%lld] = %lld", dimension,
- start_index);
- VLOG(2) << tensorflow::strings::Printf("limits[%lld] = %lld", dimension,
- limit_index);
+ StrFormat("limit index (%d) must be less than or equal to dimension "
+ "size (%d)",
+ limit_index, arg.dimensions(dimension)));
+ }
+ VLOG(2) << StrFormat("starts[%d] = %d", dimension, start_index);
+ VLOG(2) << StrFormat("limits[%d] = %d", dimension, limit_index);
if (start_index > limit_index) {
return error(
- Printf("limit index (%lld) must be greater or equal to "
- "start index (%lld) in slice with positive stride",
- limit_index, start_index));
+ StrFormat("limit index (%d) must be greater or equal to "
+ "start index (%d) in slice with positive stride",
+ limit_index, start_index));
}
if (stride <= 0) {
- return InvalidArgument("Stride (%lld) must be positive.", stride);
+ return InvalidArgument("Stride (%d) must be positive.", stride);
}
sizes.push_back((limit_index - start_index + stride - 1) / stride);
}
@@ -2075,15 +2055,14 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
TF_RETURN_IF_ERROR(
ExpectArray(start_indices_shape, "start indices of dynamic slice"));
- VLOG(2) << tensorflow::strings::Printf(
+ VLOG(2) << StrFormat(
"slicing shape %s at dynamic start_indices %s with slice_sizes={%s}",
- ShapeUtil::HumanString(operand_shape).c_str(),
- ShapeUtil::HumanString(start_indices_shape).c_str(),
- StrJoin(slice_sizes, ", ").c_str());
+ ShapeUtil::HumanString(operand_shape),
+ ShapeUtil::HumanString(start_indices_shape), StrJoin(slice_sizes, ", "));
if (ShapeUtil::Rank(start_indices_shape) != 1) {
return InvalidArgument(
- "Dynamic slice start indices of rank %lld must be rank1.",
+ "Dynamic slice start indices of rank %d must be rank1.",
ShapeUtil::Rank(start_indices_shape));
}
@@ -2095,16 +2074,15 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
const int64 start_num_dims = start_indices_shape.dimensions(0);
if (ShapeUtil::Rank(operand_shape) != start_num_dims) {
return InvalidArgument(
- "Dynamic slice start number of dimensions %lld (%s) must match rank "
- "%lld of slice input (%s).",
- start_num_dims, ShapeUtil::HumanString(start_indices_shape).c_str(),
- ShapeUtil::Rank(operand_shape),
- ShapeUtil::HumanString(operand_shape).c_str());
+ "Dynamic slice start number of dimensions %d (%s) must match rank "
+ "%d of slice input (%s).",
+ start_num_dims, ShapeUtil::HumanString(start_indices_shape),
+ ShapeUtil::Rank(operand_shape), ShapeUtil::HumanString(operand_shape));
}
if (slice_sizes.size() != ShapeUtil::Rank(operand_shape)) {
return InvalidArgument(
- "Dynamic slice index count does not match argument rank: %zu vs %lld.",
+ "Dynamic slice index count does not match argument rank: %u vs %d.",
slice_sizes.size(), ShapeUtil::Rank(operand_shape));
}
@@ -2112,16 +2090,15 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
const int64 input_dim_size = operand_shape.dimensions(dim);
const int64 slice_dim_size = slice_sizes[dim];
if (slice_dim_size < 0) {
- return InvalidArgument("Negative size index to dynamic slice: %lld.",
+ return InvalidArgument("Negative size index to dynamic slice: %d.",
slice_dim_size);
}
if (slice_dim_size > input_dim_size) {
return InvalidArgument(
- "Slice dim size %lld greater than dynamic slice dimension: %lld.",
+ "Slice dim size %d greater than dynamic slice dimension: %d.",
slice_dim_size, input_dim_size);
}
- VLOG(2) << tensorflow::strings::Printf("slice_sizes[%lld] = %lld", dim,
- slice_dim_size);
+ VLOG(2) << StrFormat("slice_sizes[%d] = %d", dim, slice_dim_size);
}
return ShapeUtil::MakeShape(operand_shape.element_type(), slice_sizes);
@@ -2137,16 +2114,16 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
TF_RETURN_IF_ERROR(ExpectArray(start_indices_shape,
"start indices of dynamic update slice"));
- VLOG(2) << tensorflow::strings::Printf(
+ VLOG(2) << StrFormat(
"updating slice of shape %s at dynamic start_indices %s with update "
"shape %s",
- ShapeUtil::HumanString(operand_shape).c_str(),
- ShapeUtil::HumanString(start_indices_shape).c_str(),
- ShapeUtil::HumanString(update_shape).c_str());
+ ShapeUtil::HumanString(operand_shape),
+ ShapeUtil::HumanString(start_indices_shape),
+ ShapeUtil::HumanString(update_shape));
if (ShapeUtil::Rank(start_indices_shape) != 1) {
return InvalidArgument(
- "Dynamic update slice start indices of rank %lld must be rank1.",
+ "Dynamic update slice start indices of rank %d must be rank1.",
ShapeUtil::Rank(start_indices_shape));
}
@@ -2158,17 +2135,16 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
const int64 start_num_dims = start_indices_shape.dimensions(0);
if (ShapeUtil::Rank(operand_shape) != start_num_dims) {
return InvalidArgument(
- "Dynamic update slice start number of dimensions %lld (%s) must match "
- "rank %lld of slice input (%s).",
- start_num_dims, ShapeUtil::HumanString(start_indices_shape).c_str(),
- ShapeUtil::Rank(operand_shape),
- ShapeUtil::HumanString(operand_shape).c_str());
+ "Dynamic update slice start number of dimensions %d (%s) must match "
+ "rank %d of slice input (%s).",
+ start_num_dims, ShapeUtil::HumanString(start_indices_shape),
+ ShapeUtil::Rank(operand_shape), ShapeUtil::HumanString(operand_shape));
}
if (ShapeUtil::Rank(update_shape) != ShapeUtil::Rank(operand_shape)) {
return InvalidArgument(
"Dynamic update slice update rank does not match argument rank: "
- "%lld vs %lld.",
+ "%d vs %d.",
ShapeUtil::Rank(update_shape), ShapeUtil::Rank(operand_shape));
}
@@ -2177,8 +2153,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Dynamic update slice update element type does not match argument. "
"operand.element_type: %s vs update.element_type: %s.",
- PrimitiveType_Name(operand_shape.element_type()).c_str(),
- PrimitiveType_Name(update_shape.element_type()).c_str());
+ PrimitiveType_Name(operand_shape.element_type()),
+ PrimitiveType_Name(update_shape.element_type()));
}
for (int64 dim = 0; dim < ShapeUtil::Rank(operand_shape); ++dim) {
@@ -2186,16 +2162,15 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
const int64 update_dim_size = update_shape.dimensions(dim);
if (update_dim_size < 0) {
return InvalidArgument(
- "Size index %lld to dynamic update slice must be >= 0.",
+ "Size index %d to dynamic update slice must be >= 0.",
update_dim_size);
}
if (update_dim_size > input_dim_size) {
return InvalidArgument(
- "Update dim size %lld greater than dynamic slice dimension: %lld.",
+ "Update dim size %d greater than dynamic slice dimension: %d.",
update_dim_size, input_dim_size);
}
- VLOG(2) << tensorflow::strings::Printf("update_sizes[%lld] = %lld", dim,
- update_dim_size);
+ VLOG(2) << StrFormat("update_sizes[%d] = %d", dim, update_dim_size);
}
return operand_shape;
@@ -2210,8 +2185,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
for (int64 dimension : dimensions) {
if (dimension >= ShapeUtil::Rank(operand_shape) || dimension < 0) {
return InvalidArgument(
- "One of the reverse dimensions (%lld) is out-of-bounds in shape %s.",
- dimension, ShapeUtil::HumanString(operand_shape).c_str());
+ "One of the reverse dimensions (%d) is out-of-bounds in shape %s.",
+ dimension, ShapeUtil::HumanString(operand_shape));
}
}
return operand_shape;
@@ -2222,14 +2197,14 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (!ShapeUtil::IsTuple(arg)) {
return InvalidArgument(
"Cannot infer shape: attempting to index into non-tuple: %s.",
- ShapeUtil::HumanString(arg).c_str());
+ ShapeUtil::HumanString(arg));
}
if (index >= arg.tuple_shapes_size()) {
return InvalidArgument(
- "Cannot infer shape: attempt to index out of tuple bounds: %lld "
+ "Cannot infer shape: attempt to index out of tuple bounds: %d "
">= %d in shape %s.",
- index, arg.tuple_shapes_size(), ShapeUtil::HumanString(arg).c_str());
+ index, arg.tuple_shapes_size(), ShapeUtil::HumanString(arg));
}
return arg.tuple_shapes(index);
@@ -2249,17 +2224,15 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
}
auto shape_string = [&]() {
- return tensorflow::strings::Printf(
- "Condition: %s; body: %s; init: %s.",
- ShapeUtil::HumanString(condition).c_str(),
- ShapeUtil::HumanString(body).c_str(),
- ShapeUtil::HumanString(init).c_str());
+ return StrFormat(
+ "Condition: %s; body: %s; init: %s.", ShapeUtil::HumanString(condition),
+ ShapeUtil::HumanString(body), ShapeUtil::HumanString(init));
};
// Check the shapes of computation parameters and return types.
if (!ShapeUtil::ShapeIs(condition.result(), PRED, {})) {
return InvalidArgument("Condition must return a boolean; got %s.",
- shape_string().c_str());
+ shape_string());
}
if (!ShapeUtil::Compatible(body.result(), condition.parameters(0)) ||
!ShapeUtil::Compatible(body.result(), body.parameters(0)) ||
@@ -2267,7 +2240,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"The parameter of condition and body, the result of the body, and init "
"must all have the same shape; got %s.",
- shape_string().c_str());
+ shape_string());
}
return init;
@@ -2279,7 +2252,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
const ProgramShape& false_computation) {
if (!ShapeUtil::ShapeIs(predicate, PRED, {})) {
return InvalidArgument("Predicate must be a boolean; got %s.",
- ShapeUtil::HumanString(predicate).c_str());
+ ShapeUtil::HumanString(predicate));
}
if (true_computation.parameters_size() != 1) {
@@ -2288,15 +2261,14 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
}
if (!ShapeUtil::Compatible(true_computation.parameters(0), true_operand)) {
auto true_shape_string = [&]() {
- return tensorflow::strings::Printf(
- "true_operand: %s; true_computation: %s",
- ShapeUtil::HumanString(true_operand).c_str(),
- ShapeUtil::HumanString(true_computation).c_str());
+ return StrFormat("true_operand: %s; true_computation: %s",
+ ShapeUtil::HumanString(true_operand),
+ ShapeUtil::HumanString(true_computation));
};
return InvalidArgument(
"true_operand must match the shape of the only parameter of "
"true_computation: got %s.",
- true_shape_string().c_str());
+ true_shape_string());
}
if (false_computation.parameters_size() != 1) {
@@ -2305,28 +2277,27 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
}
if (!ShapeUtil::Compatible(false_computation.parameters(0), false_operand)) {
auto false_shape_string = [&]() {
- return tensorflow::strings::Printf(
- "false_operand: %s; false_computation: %s",
- ShapeUtil::HumanString(false_operand).c_str(),
- ShapeUtil::HumanString(false_computation).c_str());
+ return StrFormat("false_operand: %s; false_computation: %s",
+ ShapeUtil::HumanString(false_operand),
+ ShapeUtil::HumanString(false_computation));
};
return InvalidArgument(
"false_operand must match the shape of the only parameter of "
"false_computation: got %s.",
- false_shape_string().c_str());
+ false_shape_string());
}
if (!ShapeUtil::Compatible(true_computation.result(),
false_computation.result())) {
auto shape_string = [&]() {
- return tensorflow::strings::Printf(
+ return StrFormat(
"true_computation result: %s; false_computation result: %s.",
- ShapeUtil::HumanString(true_computation.result()).c_str(),
- ShapeUtil::HumanString(false_computation.result()).c_str());
+ ShapeUtil::HumanString(true_computation.result()),
+ ShapeUtil::HumanString(false_computation.result()));
};
return InvalidArgument(
"the result of true_computation and false_computation must have the "
"same shape: got %s.",
- shape_string().c_str());
+ shape_string());
}
return true_computation.result();
}
@@ -2336,7 +2307,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
TF_RETURN_IF_ERROR(ExpectArray(operand, "operand of broadcast"));
for (int64 size : broadcast_sizes) {
if (size < 0) {
- return InvalidArgument("Broadcast with negative dimension size %lld.",
+ return InvalidArgument("Broadcast with negative dimension size %d.",
size);
}
}
@@ -2361,11 +2332,11 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (ShapeUtil::ElementsIn(operand) != ShapeUtil::ElementsIn(inferred_shape)) {
return InvalidArgument(
- "Reshape operation has mismatched element counts: from=%lld (%s) "
- "to=%lld (%s).",
- ShapeUtil::ElementsIn(operand), ShapeUtil::HumanString(operand).c_str(),
+ "Reshape operation has mismatched element counts: from=%d (%s) "
+ "to=%d (%s).",
+ ShapeUtil::ElementsIn(operand), ShapeUtil::HumanString(operand),
ShapeUtil::ElementsIn(inferred_shape),
- ShapeUtil::HumanString(inferred_shape).c_str());
+ ShapeUtil::HumanString(inferred_shape));
}
std::vector<int64> indices(ShapeUtil::Rank(operand));
@@ -2376,8 +2347,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Reshape dimensions [%s] are not a permutation of the operand "
"dimensions (operand shape is %s).",
- StrJoin(dimensions, ",").c_str(),
- ShapeUtil::HumanString(operand).c_str());
+ StrJoin(dimensions, ","), ShapeUtil::HumanString(operand));
}
return inferred_shape;
@@ -2412,9 +2382,9 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (!ShapeUtil::SameElementTypeIgnoringFpPrecision(min, operand) ||
!ShapeUtil::SameElementTypeIgnoringFpPrecision(max, operand)) {
return InvalidArgument("Clamp with different operand types: %s, %s, %s.",
- ShapeUtil::HumanString(min).c_str(),
- ShapeUtil::HumanString(operand).c_str(),
- ShapeUtil::HumanString(max).c_str());
+ ShapeUtil::HumanString(min),
+ ShapeUtil::HumanString(operand),
+ ShapeUtil::HumanString(max));
}
if (((ShapeUtil::CompatibleIgnoringFpPrecision(min, operand) ||
ShapeUtil::IsScalar(min)) &&
@@ -2431,9 +2401,9 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return ShapeUtil::ChangeElementType(min, operand.element_type());
}
}
- return Unimplemented(
- "%s, %s <clamp> %s is not implemented.", min.ShortDebugString().c_str(),
- max.ShortDebugString().c_str(), operand.ShortDebugString().c_str());
+ return Unimplemented("%s, %s <clamp> %s is not implemented.",
+ min.ShortDebugString(), max.ShortDebugString(),
+ operand.ShortDebugString());
}
// TODO(b/36794510): Make broadcast semantics more consistent, by supporting
@@ -2444,13 +2414,12 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (!ShapeUtil::CompatibleIgnoringFpPrecision(on_true, on_false)) {
return InvalidArgument(
"Operands to select must be the same shape; got %s and %s.",
- ShapeUtil::HumanString(on_true).c_str(),
- ShapeUtil::HumanString(on_false).c_str());
+ ShapeUtil::HumanString(on_true), ShapeUtil::HumanString(on_false));
}
if (pred.element_type() != PRED) {
return InvalidArgument(
"Select's pred operand must have PRED element type; got %s.",
- ShapeUtil::HumanString(pred).c_str());
+ ShapeUtil::HumanString(pred));
}
if (ShapeUtil::CompatibleIgnoringElementType(pred, on_true) ||
ShapeUtil::IsScalar(pred)) {
@@ -2463,7 +2432,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Select operation with non-scalar predicate with dimensionality "
" different from the other operands: %s.",
- ShapeUtil::HumanString(pred).c_str());
+ ShapeUtil::HumanString(pred));
}
}
@@ -2474,18 +2443,17 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (!ShapeUtil::Compatible(on_true, on_false)) {
return InvalidArgument(
"Operands to tuple-select must be the same shape; got %s and %s.",
- ShapeUtil::HumanString(on_true).c_str(),
- ShapeUtil::HumanString(on_false).c_str());
+ ShapeUtil::HumanString(on_true), ShapeUtil::HumanString(on_false));
}
if (pred.element_type() != PRED) {
return InvalidArgument(
"TupleSelect's pred operand must have PRED element type; got %s.",
- ShapeUtil::HumanString(pred).c_str());
+ ShapeUtil::HumanString(pred));
}
if (!ShapeUtil::IsScalar(pred)) {
return InvalidArgument(
"TupleSelect operation with non-scalar predicate: %s.",
- ShapeUtil::HumanString(pred).c_str());
+ ShapeUtil::HumanString(pred));
}
return on_true;
}
@@ -2502,10 +2470,10 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
});
return InvalidArgument(
"Call applied function arity must match number of arguments; got: "
- "arity: %d, arguments: %zu; computation signature: %s; argument "
+ "arity: %d, arguments: %u; computation signature: %s; argument "
"shapes: [%s].",
- to_apply.parameters_size(), arg_shapes.size(),
- computation_signature.c_str(), argument_shapes.c_str());
+ to_apply.parameters_size(), arg_shapes.size(), computation_signature,
+ argument_shapes);
}
// All arguments must be compatible with the program shape.
@@ -2516,8 +2484,8 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
return InvalidArgument(
"Call parameter must match argument; got parameter %d shape: %s, "
"argument shape: %s.",
- i, ShapeUtil::HumanString(param_shape).c_str(),
- ShapeUtil::HumanString(arg_shape).c_str());
+ i, ShapeUtil::HumanString(param_shape),
+ ShapeUtil::HumanString(arg_shape));
}
}
@@ -2531,14 +2499,14 @@ static Status ValidateGatherDimensionNumbers(
if (!absl::c_is_sorted(dim_numbers.offset_dims())) {
return InvalidArgument(
"Output window dimensions in gather op must be ascending; got: %s.",
- StrJoin(dim_numbers.offset_dims(), ", ").c_str());
+ StrJoin(dim_numbers.offset_dims(), ", "));
}
if (absl::c_adjacent_find(dim_numbers.offset_dims()) !=
dim_numbers.offset_dims().end()) {
return InvalidArgument(
"Output window dimensions in gather op must not repeat; got: %s.",
- StrJoin(dim_numbers.offset_dims(), ", ").c_str());
+ StrJoin(dim_numbers.offset_dims(), ", "));
}
const int64 output_offset_dim_count = dim_numbers.offset_dims_size();
@@ -2549,9 +2517,9 @@ static Status ValidateGatherDimensionNumbers(
int64 offset_dim = dim_numbers.offset_dims(i);
if (offset_dim < 0 || offset_dim >= output_shape_rank) {
return InvalidArgument(
- "Offset dimension %d in gather op is out of bounds; got %lld, but "
+ "Offset dimension %d in gather op is out of bounds; got %d, but "
"should "
- "have been in [0,%lld).",
+ "have been in [0,%d).",
i, offset_dim, output_shape_rank);
}
}
@@ -2560,8 +2528,8 @@ static Status ValidateGatherDimensionNumbers(
start_indices_shape[dim_numbers.index_vector_dim()]) {
return InvalidArgument(
"Gather op has %d elements in start_index_map and the "
- "bound of dimension index_vector_dim=%lld of start_indices is "
- "%lld. These two numbers must be equal.",
+ "bound of dimension index_vector_dim=%d of start_indices is "
+ "%d. These two numbers must be equal.",
dim_numbers.start_index_map_size(), dim_numbers.index_vector_dim(),
start_indices_shape[dim_numbers.index_vector_dim()]);
}
@@ -2571,7 +2539,7 @@ static Status ValidateGatherDimensionNumbers(
if (operand_dim_for_start_index_i < 0 ||
operand_dim_for_start_index_i >= input_shape.dimensions_size()) {
return InvalidArgument(
- "Invalid start_index_map; domain is [0, %d), got: %d->%lld.",
+ "Invalid start_index_map; domain is [0, %d), got: %d->%d.",
input_shape.dimensions_size(), i, operand_dim_for_start_index_i);
}
}
@@ -2587,14 +2555,14 @@ static Status ValidateGatherDimensionNumbers(
return InvalidArgument(
"Repeated dimensions are not allowed in start_index_map; "
"got: %s.",
- StrJoin(dim_numbers.start_index_map(), ", ").c_str());
+ StrJoin(dim_numbers.start_index_map(), ", "));
}
for (int64 collapsed_dim : dim_numbers.collapsed_slice_dims()) {
if (collapsed_dim < 0 || collapsed_dim >= input_shape.dimensions_size()) {
return InvalidArgument(
"Invalid collapsed_slice_dims set in gather op; valid range is [0, "
- "%d), got: %lld.",
+ "%d), got: %d.",
input_shape.dimensions_size(), collapsed_dim);
}
}
@@ -2602,7 +2570,7 @@ static Status ValidateGatherDimensionNumbers(
if (!absl::c_is_sorted(dim_numbers.collapsed_slice_dims())) {
return InvalidArgument(
"collapsed_slice_dims in gather op must be sorted; got: %s",
- StrJoin(dim_numbers.collapsed_slice_dims(), ", ").c_str());
+ StrJoin(dim_numbers.collapsed_slice_dims(), ", "));
}
if (absl::c_adjacent_find(dim_numbers.collapsed_slice_dims()) !=
@@ -2610,7 +2578,7 @@ static Status ValidateGatherDimensionNumbers(
return InvalidArgument(
"Repeated dimensions not allowed in collapsed_slice_dims in gather op; "
"got: %s.",
- StrJoin(dim_numbers.collapsed_slice_dims(), ", ").c_str());
+ StrJoin(dim_numbers.collapsed_slice_dims(), ", "));
}
return Status::OK();
@@ -2628,7 +2596,7 @@ static Status ValidateGatherDimensionNumbers(
if (!ShapeUtil::ElementIsIntegral(start_indices_shape)) {
return InvalidArgument(
"Gather indices parameter must be an integral tensor; got %s.",
- ShapeUtil::HumanString(start_indices_shape).c_str());
+ ShapeUtil::HumanString(start_indices_shape));
}
// We implicitly reshape gather indices of shape P[A,B,C] to P[A,B,C,1] if
@@ -2641,7 +2609,7 @@ static Status ValidateGatherDimensionNumbers(
return InvalidArgument(
"Gather index leaf dimension must be within [0, rank(start_indices) + "
"1). rank(start_indices) is %d and gather index leaf dimension is "
- "%lld.",
+ "%d.",
start_indices_shape.dimensions_size(),
gather_dim_numbers.index_vector_dim());
}
@@ -2672,9 +2640,8 @@ static Status ValidateGatherDimensionNumbers(
"All components of the offset index in a gather op must either be a "
"offset dimension or explicitly collapsed; got len(slice_sizes)=%lu, "
"output_slice_sizes=%s, collapsed_slice_dims=%s.",
- slice_sizes.size(),
- StrJoin(gather_dim_numbers.offset_dims(), ",").c_str(),
- StrJoin(gather_dim_numbers.collapsed_slice_dims(), ",").c_str());
+ slice_sizes.size(), StrJoin(gather_dim_numbers.offset_dims(), ","),
+ StrJoin(gather_dim_numbers.collapsed_slice_dims(), ","));
}
for (int i = 0; i < slice_sizes.size(); i++) {
@@ -2683,7 +2650,7 @@ static Status ValidateGatherDimensionNumbers(
if (slice_size < 0 || slice_size > corresponding_input_size) {
return InvalidArgument(
"Slice size at index %d in gather op is out of range, must be "
- "within [0, %lld), got %lld.",
+ "within [0, %d), got %d.",
i, corresponding_input_size + 1, slice_size);
}
}
@@ -2692,7 +2659,7 @@ static Status ValidateGatherDimensionNumbers(
if (slice_sizes[gather_dim_numbers.collapsed_slice_dims(i)] != 1) {
return InvalidArgument(
"Gather op can only collapse slice dims with bound 1, but bound is "
- "%lld for index %lld at position %d.",
+ "%d for index %d at position %d.",
slice_sizes[gather_dim_numbers.collapsed_slice_dims(i)],
gather_dim_numbers.collapsed_slice_dims(i), i);
}
@@ -2737,20 +2704,20 @@ Status ValidateScatterDimensionNumbers(
if (!absl::c_is_sorted(dim_numbers.update_window_dims())) {
return InvalidArgument(
"update_window_dims in scatter op must be sorted; got: %s.",
- StrJoin(dim_numbers.update_window_dims(), ", ").c_str());
+ StrJoin(dim_numbers.update_window_dims(), ", "));
}
if (absl::c_adjacent_find(dim_numbers.update_window_dims()) !=
dim_numbers.update_window_dims().end()) {
return InvalidArgument(
"update_window_dims in scatter op must not repeat; got: %s.",
- StrJoin(dim_numbers.update_window_dims(), ", ").c_str());
+ StrJoin(dim_numbers.update_window_dims(), ", "));
}
const int64 updates_rank = ShapeUtil::Rank(updates_shape);
for (int64 window_dim : dim_numbers.update_window_dims()) {
if (window_dim < 0 || window_dim >= updates_rank) {
return InvalidArgument(
"Invalid update_window_dims set in scatter op; valid range is [0, "
- "%lld). got: %lld.",
+ "%d). got: %d.",
updates_rank, window_dim);
}
}
@@ -2759,19 +2726,19 @@ Status ValidateScatterDimensionNumbers(
if (!absl::c_is_sorted(dim_numbers.inserted_window_dims())) {
return InvalidArgument(
"inserted_window_dims in scatter op must be sorted; got: %s.",
- StrJoin(dim_numbers.inserted_window_dims(), ", ").c_str());
+ StrJoin(dim_numbers.inserted_window_dims(), ", "));
}
if (absl::c_adjacent_find(dim_numbers.inserted_window_dims()) !=
dim_numbers.inserted_window_dims().end()) {
return InvalidArgument(
"inserted_window_dims in scatter op must not repeat; got: %s.",
- StrJoin(dim_numbers.inserted_window_dims(), ", ").c_str());
+ StrJoin(dim_numbers.inserted_window_dims(), ", "));
}
for (int64 inserted_dim : dim_numbers.inserted_window_dims()) {
if (inserted_dim < 0 || inserted_dim >= operand_shape.dimensions_size()) {
return InvalidArgument(
"Invalid inserted_window_dims set in scatter op; valid range is [0, "
- "%d), got: %lld.",
+ "%d), got: %d.",
operand_shape.dimensions_size(), inserted_dim);
}
}
@@ -2781,7 +2748,7 @@ Status ValidateScatterDimensionNumbers(
scatter_indices_shape[dim_numbers.index_vector_dim()]) {
return InvalidArgument(
"Scatter op has %d elements in scatter_dims_to_operand_dims and the "
- "bound of dimension index_vector_dim=%lld of scatter_indices is %lld. "
+ "bound of dimension index_vector_dim=%d of scatter_indices is %d. "
"These two numbers must be equal.",
dim_numbers.scatter_dims_to_operand_dims_size(),
dim_numbers.index_vector_dim(),
@@ -2794,7 +2761,7 @@ Status ValidateScatterDimensionNumbers(
scatter_dim_to_operand_dim >= operand_shape.dimensions_size()) {
return InvalidArgument(
"Invalid scatter_dims_to_operand_dims mapping; domain is [0, %d), "
- "got: %d->%lld.",
+ "got: %d->%d.",
operand_shape.dimensions_size(), i, scatter_dim_to_operand_dim);
}
}
@@ -2807,7 +2774,7 @@ Status ValidateScatterDimensionNumbers(
return InvalidArgument(
"Repeated dimensions not allowed in scatter_dims_to_operand_dims; "
"got: %s.",
- StrJoin(dim_numbers.scatter_dims_to_operand_dims(), ", ").c_str());
+ StrJoin(dim_numbers.scatter_dims_to_operand_dims(), ", "));
}
return Status::OK();
@@ -2828,7 +2795,7 @@ Status ValidateScatterDimensionNumbers(
if (!ShapeUtil::ElementIsIntegral(scatter_indices_shape)) {
return InvalidArgument(
"Scatter indices parameter must be an integral tensor; got %s.",
- ShapeUtil::HumanString(scatter_indices_shape).c_str());
+ ShapeUtil::HumanString(scatter_indices_shape));
}
if (scatter_indices_shape.dimensions_size() <
@@ -2837,7 +2804,7 @@ Status ValidateScatterDimensionNumbers(
return InvalidArgument(
"Scatter index leaf dimension must be within [0, rank(scatter_indices)"
" + 1). rank(scatter_indices) is %d and scatter index leaf dimension "
- "is %lld.",
+ "is %d.",
scatter_indices_shape.dimensions_size(),
scatter_dim_numbers.index_vector_dim());
}
@@ -2859,7 +2826,7 @@ Status ValidateScatterDimensionNumbers(
int64 expected_updates_rank = expanded_scatter_indices_shape.size() - 1 +
scatter_dim_numbers.update_window_dims_size();
if (ShapeUtil::Rank(updates_shape) != expected_updates_rank) {
- return InvalidArgument("Updates tensor must be of rank %lld; got %lld.",
+ return InvalidArgument("Updates tensor must be of rank %d; got %d.",
expected_updates_rank,
ShapeUtil::Rank(updates_shape));
}
@@ -2885,7 +2852,7 @@ Status ValidateScatterDimensionNumbers(
return InvalidArgument(
"Bounds of the window dimensions of updates must not exceed the "
"bounds of the corresponding dimensions of operand. For dimension "
- "%lld, updates bound is %lld, operand bound is %lld.",
+ "%d, updates bound is %d, operand bound is %d.",
update_window_dim, updates_shape.dimensions(update_window_dim),
max_update_slice_sizes[i]);
}
@@ -2906,8 +2873,8 @@ Status ValidateScatterDimensionNumbers(
return InvalidArgument(
"Bounds of the scatter dimensions of updates must be same as the "
"bounds of the corresponding dimensions of scatter indices. For "
- "scatter dimension %lld, updates bound is %lld, scatter_indices "
- "bound is %lld.",
+ "scatter dimension %d, updates bound is %d, scatter_indices "
+ "bound is %d.",
i, updates_shape.dimensions(i),
expanded_scatter_indices_shape[scatter_dims_seen]);
}