diff options
Diffstat (limited to 'tensorflow/compiler')
125 files changed, 1405 insertions, 2069 deletions
diff --git a/tensorflow/compiler/jit/xla_device_context.cc b/tensorflow/compiler/jit/xla_device_context.cc index f329e83e14..0ab81ebd5f 100644 --- a/tensorflow/compiler/jit/xla_device_context.cc +++ b/tensorflow/compiler/jit/xla_device_context.cc @@ -137,7 +137,7 @@ void XlaTransferManager::CopyDeviceTensorToCPU(const Tensor* device_tensor, done(result.status()); return; } - const void* src_ptr = xla::LiteralUtil::InternalData(*result.ValueOrDie()); + const void* src_ptr = result.ValueOrDie()->InternalData(); void* dst_ptr = DMAHelper::base(cpu_tensor); size_t total_bytes = cpu_tensor->TotalBytes(); memcpy(dst_ptr, src_ptr, total_bytes); diff --git a/tensorflow/compiler/tf2xla/kernels/batchtospace_op.cc b/tensorflow/compiler/tf2xla/kernels/batchtospace_op.cc index eb4bd47ee5..47d2d747e6 100644 --- a/tensorflow/compiler/tf2xla/kernels/batchtospace_op.cc +++ b/tensorflow/compiler/tf2xla/kernels/batchtospace_op.cc @@ -126,8 +126,8 @@ void BatchToSpace(XlaOpKernelContext* ctx, std::vector<int64> start_indices(input_rank, 0); std::vector<int64> end_indices = reshaped_permuted_shape; for (int i = 0; i < block_rank; ++i) { - int64 crop_start = xla::LiteralUtil::Get<int64>(crops, {i, 0}); - int64 crop_end = xla::LiteralUtil::Get<int64>(crops, {i, 1}); + int64 crop_start = crops.Get<int64>({i, 0}); + int64 crop_end = crops.Get<int64>({i, 1}); OP_REQUIRES(ctx, crop_start >= 0 && crop_end >= 0, errors::InvalidArgument("Crops must be non-negative")); start_indices[1 + i] = crop_start; diff --git a/tensorflow/compiler/tf2xla/kernels/bcast_ops.cc b/tensorflow/compiler/tf2xla/kernels/bcast_ops.cc index b0fee5e4bc..bc2cd31230 100644 --- a/tensorflow/compiler/tf2xla/kernels/bcast_ops.cc +++ b/tensorflow/compiler/tf2xla/kernels/bcast_ops.cc @@ -55,7 +55,7 @@ class BCastGradArgsOp : public XlaOpKernel { BCast::Vec vec; for (int64 i = 0; i < in_shape.num_elements(); ++i) { - vec.push_back(xla::LiteralUtil::Get<int>(literal, {i})); + vec.push_back(literal.Get<int>({i})); } shapes.push_back(vec); } diff --git a/tensorflow/compiler/tf2xla/kernels/concat_op.cc b/tensorflow/compiler/tf2xla/kernels/concat_op.cc index e2eacb3839..73a4740e29 100644 --- a/tensorflow/compiler/tf2xla/kernels/concat_op.cc +++ b/tensorflow/compiler/tf2xla/kernels/concat_op.cc @@ -52,7 +52,7 @@ class ConcatBaseOp : public XlaOpKernel { xla::Literal literal; OP_REQUIRES_OK(ctx, ctx->ConstantInput(axis_index_, &literal)); // TODO(annarev): add a helper to support int64 input. - const int32 concat_dim = xla::LiteralUtil::Get<int>(literal, {}); + const int32 concat_dim = literal.Get<int>({}); std::vector<xla::ComputationDataHandle> values; std::vector<TensorShape> shapes; @@ -163,7 +163,7 @@ class ConcatOffsetOp : public XlaOpKernel { xla::Literal concat_dim_literal; OP_REQUIRES_OK(ctx, ctx->ConstantInput(0, &concat_dim_literal)); - const int64 cdim = xla::LiteralUtil::Get<int>(concat_dim_literal, {}); + const int64 cdim = concat_dim_literal.Get<int>({}); VLOG(1) << "ConcatOffset " << cdim << "," << dims; int32 axis = cdim < 0 ? cdim + dims : cdim; @@ -185,12 +185,10 @@ class ConcatOffsetOp : public XlaOpKernel { for (int64 j = 0; j < dims; ++j) { if (j == axis) { out_vec(j) = offset; - offset += xla::LiteralUtil::Get<int>(inp_literal, {j}); + offset += inp_literal.Get<int>({j}); } else { - const int32 inp0_element = - xla::LiteralUtil::Get<int>(inp0_literal, {j}); - const int32 inp_element = - xla::LiteralUtil::Get<int>(inp_literal, {j}); + const int32 inp0_element = inp0_literal.Get<int>({j}); + const int32 inp_element = inp_literal.Get<int>({j}); OP_REQUIRES( ctx, (inp0_element == inp_element), errors::InvalidArgument("input[", i, ",", j, "] mismatch: ", diff --git a/tensorflow/compiler/tf2xla/kernels/dynamic_stitch_op.cc b/tensorflow/compiler/tf2xla/kernels/dynamic_stitch_op.cc index 2d1a056719..faa7ef0ef9 100644 --- a/tensorflow/compiler/tf2xla/kernels/dynamic_stitch_op.cc +++ b/tensorflow/compiler/tf2xla/kernels/dynamic_stitch_op.cc @@ -103,8 +103,7 @@ class DynamicStitchOp : public XlaOpKernel { int max_index = -1; for (int input_num = 0; input_num < indices.size(); input_num++) { for (int i = 0; i < indices[input_num].shape().dimensions(0); ++i) { - max_index = std::max( - max_index, xla::LiteralUtil::Get<int>(indices[input_num], {i})); + max_index = std::max(max_index, indices[input_num].Get<int>({i})); } } int number_of_indices = max_index + 1; @@ -118,7 +117,7 @@ class DynamicStitchOp : public XlaOpKernel { int index_used_count = 0; for (int input_num = 0; input_num < indices.size(); input_num++) { for (int i = 0; i < indices[input_num].shape().dimensions(0); ++i) { - int index = xla::LiteralUtil::Get<int>(indices[input_num], {i}); + int index = indices[input_num].Get<int>({i}); src_input_vector[index] = input_num; src_slice_vector[index] = i; if (!src_index_used[index]) { diff --git a/tensorflow/compiler/tf2xla/kernels/fill_op.cc b/tensorflow/compiler/tf2xla/kernels/fill_op.cc index 1e1d2a1b4b..9e090fe01c 100644 --- a/tensorflow/compiler/tf2xla/kernels/fill_op.cc +++ b/tensorflow/compiler/tf2xla/kernels/fill_op.cc @@ -52,7 +52,7 @@ class FillOp : public XlaOpKernel { std::vector<int64> broadcast; broadcast.reserve(dims_literal.shape().dimensions(0)); for (int i = 0; i < dims_literal.shape().dimensions(0); ++i) { - broadcast.push_back(xla::LiteralUtil::Get<int>(dims_literal, {i})); + broadcast.push_back(dims_literal.Get<int>({i})); } // Look up the value input, reshaping to a scalar if it was a // 'legacy' scalar (secretly a vector). diff --git a/tensorflow/compiler/tf2xla/kernels/gather_op.cc b/tensorflow/compiler/tf2xla/kernels/gather_op.cc index 49eadaf9d1..3c1cdef5f8 100644 --- a/tensorflow/compiler/tf2xla/kernels/gather_op.cc +++ b/tensorflow/compiler/tf2xla/kernels/gather_op.cc @@ -66,10 +66,10 @@ class GatherOp : public XlaOpKernel { std::vector<xla::ComputationDataHandle> args; args.push_back(tc.GetOrCreateRuntimeContextParameter()); args.push_back(b.ConstantLiteral( - *xla::LiteralUtil::CreateR0<int64>(indices_shape.num_elements()))); + *xla::Literal::CreateR0<int64>(indices_shape.num_elements()))); args.push_back(b.ConstantLiteral( - *xla::LiteralUtil::CreateR0<int64>(params_shape.dim_size(0)))); - args.push_back(b.ConstantLiteral(*xla::LiteralUtil::CreateR0<int64>( + *xla::Literal::CreateR0<int64>(params_shape.dim_size(0)))); + args.push_back(b.ConstantLiteral(*xla::Literal::CreateR0<int64>( params_shape.num_elements() / params_shape.dim_size(0)))); args.push_back(ctx->Input(0)); args.push_back(ctx->Input(1)); diff --git a/tensorflow/compiler/tf2xla/kernels/index_ops.cc b/tensorflow/compiler/tf2xla/kernels/index_ops.cc index df002dddd0..6be66cf66e 100644 --- a/tensorflow/compiler/tf2xla/kernels/index_ops.cc +++ b/tensorflow/compiler/tf2xla/kernels/index_ops.cc @@ -69,7 +69,7 @@ class ArgMaxOp : public XlaOpKernel { // XLA op would have the same requirement. xla::Literal literal; OP_REQUIRES_OK(ctx, ctx->ConstantInput(1, &literal)); - const int32 dim = xla::LiteralUtil::Get<int32>(literal, {}); + const int32 dim = literal.Get<int32>({}); OP_REQUIRES(ctx, dim >= 0, errors::InvalidArgument("dim must be >= 0")); OP_REQUIRES( ctx, dim < input_shape.dims(), @@ -97,14 +97,13 @@ class ArgMaxOp : public XlaOpKernel { std::vector<xla::ComputationDataHandle> args; args.push_back(ctx->Input(0)); args.push_back(b.ConstantLiteral( - *xla::LiteralUtil::CreateR1<int64>(input_shape.dim_sizes()))); + *xla::Literal::CreateR1<int64>(input_shape.dim_sizes()))); if (input_shape.dims() > 1) { // Don't bother passing the output shape and dim for the 1d case, since // the shape is always a scalar and the dim is always 0. args.push_back(b.ConstantLiteral( - *xla::LiteralUtil::CreateR1<int64>(output_shape.dim_sizes()))); - args.push_back( - b.ConstantLiteral(*xla::LiteralUtil::CreateR0<int32>(dim))); + *xla::Literal::CreateR1<int64>(output_shape.dim_sizes()))); + args.push_back(b.ConstantLiteral(*xla::Literal::CreateR0<int32>(dim))); } xla::Shape xla_shape = diff --git a/tensorflow/compiler/tf2xla/kernels/pad_op.cc b/tensorflow/compiler/tf2xla/kernels/pad_op.cc index 22476f4a0c..cc13ab0203 100644 --- a/tensorflow/compiler/tf2xla/kernels/pad_op.cc +++ b/tensorflow/compiler/tf2xla/kernels/pad_op.cc @@ -60,8 +60,8 @@ class PadOp : public XlaOpKernel { xla::PaddingConfig config; for (int i = 0; i < fixed_dims; ++i) { auto* dim = config.add_dimensions(); - int before = xla::LiteralUtil::Get<int32>(pad_literal, {i, 0}); - int after = xla::LiteralUtil::Get<int32>(pad_literal, {i, 1}); + int before = pad_literal.Get<int32>({i, 0}); + int after = pad_literal.Get<int32>({i, 1}); OP_REQUIRES(ctx, before >= 0 && after >= 0, errors::InvalidArgument("Paddings must be non-negative: ", before, " ", after)); diff --git a/tensorflow/compiler/tf2xla/kernels/reduction_ops.cc b/tensorflow/compiler/tf2xla/kernels/reduction_ops.cc index 518a9372c4..dae2eb9d2a 100644 --- a/tensorflow/compiler/tf2xla/kernels/reduction_ops.cc +++ b/tensorflow/compiler/tf2xla/kernels/reduction_ops.cc @@ -63,7 +63,7 @@ class MinOp : public XlaReductionOp { xla::ComputationBuilder* builder) override { xla::PrimitiveType type; TF_CHECK_OK(DataTypeToPrimitiveType(input_type(0), &type)); - return builder->ConstantLiteral(xla::LiteralUtil::MaxValue(type)); + return builder->ConstantLiteral(xla::Literal::MaxValue(type)); } void BuildReducer(xla::ComputationBuilder* builder, @@ -83,7 +83,7 @@ class MaxOp : public XlaReductionOp { xla::ComputationBuilder* builder) override { xla::PrimitiveType type; TF_CHECK_OK(DataTypeToPrimitiveType(input_type(0), &type)); - return builder->ConstantLiteral(xla::LiteralUtil::MinValue(type)); + return builder->ConstantLiteral(xla::Literal::MinValue(type)); } void BuildReducer(xla::ComputationBuilder* builder, diff --git a/tensorflow/compiler/tf2xla/kernels/reduction_ops_common.cc b/tensorflow/compiler/tf2xla/kernels/reduction_ops_common.cc index 8798c80ad5..4b5d09eb9f 100644 --- a/tensorflow/compiler/tf2xla/kernels/reduction_ops_common.cc +++ b/tensorflow/compiler/tf2xla/kernels/reduction_ops_common.cc @@ -66,13 +66,13 @@ void XlaReductionOp::Compile(XlaOpKernelContext* ctx) { 1, {axes_tensor_shape.num_elements()}, &axes_literal)); VLOG(1) << "data shape: " << data_shape.DebugString(); - VLOG(1) << "axes : " << xla::LiteralUtil::ToString(axes_literal); + VLOG(1) << "axes : " << axes_literal.ToString(); gtl::InlinedVector<bool, 4> bitmap(data_shape.dims(), false); std::vector<int64> xla_axes; int64 num_elements_reduced = 1LL; for (int64 i = 0; i < axes_tensor_shape.num_elements(); ++i) { - int32 index = xla::LiteralUtil::Get<int>(axes_literal, {i}); + int32 index = axes_literal.Get<int>({i}); OP_REQUIRES(ctx, !(index < -data_shape.dims() || index >= data_shape.dims()), errors::InvalidArgument("Invalid reduction dimension (", index, diff --git a/tensorflow/compiler/tf2xla/kernels/reshape_op.cc b/tensorflow/compiler/tf2xla/kernels/reshape_op.cc index df542350b4..5952e75272 100644 --- a/tensorflow/compiler/tf2xla/kernels/reshape_op.cc +++ b/tensorflow/compiler/tf2xla/kernels/reshape_op.cc @@ -50,7 +50,7 @@ class ReshapeOp : public XlaOpKernel { int64 product = 1; int unknown_index = -1; for (int d = 0; d < num_dims; ++d) { - const int32 size = xla::LiteralUtil::Get<int>(literal, {d}); + const int32 size = literal.Get<int>({d}); if (size == -1) { OP_REQUIRES( ctx, unknown_index == -1, diff --git a/tensorflow/compiler/tf2xla/kernels/sequence_ops.cc b/tensorflow/compiler/tf2xla/kernels/sequence_ops.cc index 5b6fa64fa8..c2b0e1bb4c 100644 --- a/tensorflow/compiler/tf2xla/kernels/sequence_ops.cc +++ b/tensorflow/compiler/tf2xla/kernels/sequence_ops.cc @@ -32,7 +32,7 @@ template <typename T> Status GetValue(int index, XlaOpKernelContext* ctx, T* value) { xla::Literal literal; TF_RETURN_IF_ERROR(ctx->ConstantInput(index, &literal)); - *value = xla::LiteralUtil::Get<T>(literal, {}); + *value = literal.Get<T>({}); return Status::OK(); } @@ -41,10 +41,10 @@ Status GetIntValue(int index, XlaOpKernelContext* ctx, int64* value) { TF_RETURN_IF_ERROR(ctx->ConstantInput(index, &literal)); switch (literal.shape().element_type()) { case xla::S32: - *value = xla::LiteralUtil::Get<int32>(literal, {}); + *value = literal.Get<int32>({}); break; case xla::S64: - *value = xla::LiteralUtil::Get<int64>(literal, {}); + *value = literal.Get<int64>({}); break; default: return errors::InvalidArgument("Invalid argument type for argument", @@ -58,9 +58,9 @@ template <typename T> Status CreateRangeTensor(const xla::Literal& start_literal, const xla::Literal& limit_literal, const xla::Literal& delta_literal, Tensor* output) { - T start = xla::LiteralUtil::Get<T>(start_literal, {}); - T limit = xla::LiteralUtil::Get<T>(limit_literal, {}); - T delta = xla::LiteralUtil::Get<T>(delta_literal, {}); + T start = start_literal.Get<T>({}); + T limit = limit_literal.Get<T>({}); + T delta = delta_literal.Get<T>({}); if (delta == 0) { return errors::InvalidArgument("Requires delta != 0: ", delta); diff --git a/tensorflow/compiler/tf2xla/kernels/spacetobatch_op.cc b/tensorflow/compiler/tf2xla/kernels/spacetobatch_op.cc index f15b354cb2..83a87f19a7 100644 --- a/tensorflow/compiler/tf2xla/kernels/spacetobatch_op.cc +++ b/tensorflow/compiler/tf2xla/kernels/spacetobatch_op.cc @@ -56,8 +56,8 @@ void SpaceToBatch(XlaOpKernelContext* ctx, padding_config.add_dimensions(); // Don't pad the batch dimension. for (int i = 0; i < block_rank; ++i) { auto* dim = padding_config.add_dimensions(); - int64 pad_start = xla::LiteralUtil::Get<int64>(paddings, {i, 0}); - int64 pad_end = xla::LiteralUtil::Get<int64>(paddings, {i, 1}); + int64 pad_start = paddings.Get<int64>({i, 0}); + int64 pad_end = paddings.Get<int64>({i, 1}); OP_REQUIRES(ctx, pad_start >= 0 && pad_end >= 0, errors::InvalidArgument("Paddings must be non-negative")); dim->set_edge_padding_low(pad_start); diff --git a/tensorflow/compiler/tf2xla/kernels/split_op.cc b/tensorflow/compiler/tf2xla/kernels/split_op.cc index f3cec5c3ca..017f3a110e 100644 --- a/tensorflow/compiler/tf2xla/kernels/split_op.cc +++ b/tensorflow/compiler/tf2xla/kernels/split_op.cc @@ -39,7 +39,7 @@ class SplitOp : public XlaOpKernel { int32 split_dim; if (index_shape.dims() == 0) { - split_dim = xla::LiteralUtil::Get<int>(literal_index, {}); + split_dim = literal_index.Get<int>({}); } else { OP_REQUIRES( ctx, index_shape.dims() == 1, @@ -49,7 +49,7 @@ class SplitOp : public XlaOpKernel { ctx, index_shape.dim_size(0) == 1, errors::InvalidArgument("split_index input to Split Op must be a " "scalar or a vector with 1 element")); - split_dim = xla::LiteralUtil::Get<int>(literal_index, {0}); + split_dim = literal_index.Get<int>({0}); } const int32 num_split = num_outputs(); const TensorShape input_shape = ctx->InputShape(1); @@ -115,7 +115,7 @@ class SplitVOp : public XlaOpKernel { OP_REQUIRES(ctx, index_shape.dims() == 0, errors::InvalidArgument("split_dim input to Split Op must be a " "scalar")); - split_dim = xla::LiteralUtil::Get<int>(literal_index, {}); + split_dim = literal_index.Get<int>({}); xla::ComputationDataHandle input = ctx->Input(0); const TensorShape input_shape = ctx->InputShape(0); @@ -152,7 +152,7 @@ class SplitVOp : public XlaOpKernel { for (int i = 0; i < num_split; ++i) { int slice_size; - slice_size = xla::LiteralUtil::Get<int>(split_size_literal, {i}); + slice_size = split_size_literal.Get<int>({i}); if (slice_size == -1) { OP_REQUIRES( ctx, neg_one_dim == -1, diff --git a/tensorflow/compiler/tf2xla/kernels/tile_ops.cc b/tensorflow/compiler/tf2xla/kernels/tile_ops.cc index 4cc2eb8f87..9ee6bd8925 100644 --- a/tensorflow/compiler/tf2xla/kernels/tile_ops.cc +++ b/tensorflow/compiler/tf2xla/kernels/tile_ops.cc @@ -68,7 +68,7 @@ class TileOp : public XlaOpKernel { bool all_multiples_are_one = true; bool one_dimension_is_broadcasted_without_multiple = true; for (int i = 0; i < input_dims; ++i) { - int multiple = xla::LiteralUtil::Get<int>(literal, {i}); + int multiple = literal.Get<int>({i}); OP_REQUIRES(ctx, multiple, errors::InvalidArgument("Expected multiples[", i, "] >= 0, but got ", multiple)); diff --git a/tensorflow/compiler/tf2xla/literal_util.cc b/tensorflow/compiler/tf2xla/literal_util.cc index 1f2bc01cf4..e166e8a9b0 100644 --- a/tensorflow/compiler/tf2xla/literal_util.cc +++ b/tensorflow/compiler/tf2xla/literal_util.cc @@ -27,13 +27,13 @@ Status HostTensorToLiteral(const Tensor& host_tensor, xla::Literal* literal) { TF_RETURN_IF_ERROR(TensorShapeToXLAShape( host_tensor.dtype(), host_tensor.shape(), literal->mutable_shape())); - xla::LiteralUtil::Reserve(host_tensor.NumElements(), literal); + literal->Reserve(host_tensor.NumElements()); // memcpy over the payload ... // TODO(phawkins): handle string types. size_t total_bytes = host_tensor.TotalBytes(); if (total_bytes > 0) { - void* dst_ptr = xla::LiteralUtil::MutableInternalData(literal); + void* dst_ptr = literal->MutableInternalData(); const void* src_ptr = DMAHelper::base(&host_tensor); memcpy(dst_ptr, src_ptr, total_bytes); } @@ -55,7 +55,7 @@ Status LiteralToHostTensor(const xla::Literal& literal, DataType target_type, *host_tensor = Tensor(target_type, shape); size_t total_bytes = host_tensor->TotalBytes(); if (total_bytes > 0) { - const void* src_ptr = xla::LiteralUtil::InternalData(literal); + const void* src_ptr = literal.InternalData(); void* dst_ptr = DMAHelper::base(host_tensor); memcpy(dst_ptr, src_ptr, total_bytes); } diff --git a/tensorflow/compiler/tf2xla/literal_util_test.cc b/tensorflow/compiler/tf2xla/literal_util_test.cc index 56993bc585..f3d6787daa 100644 --- a/tensorflow/compiler/tf2xla/literal_util_test.cc +++ b/tensorflow/compiler/tf2xla/literal_util_test.cc @@ -27,7 +27,7 @@ TEST(LiteralUtil, LiteralToHostTensor) { { std::vector<int64> int64_values = {1, 2, 3}; std::unique_ptr<xla::Literal> int64_values_literal = - xla::LiteralUtil::CreateR1(gtl::ArraySlice<int64>(int64_values)); + xla::Literal::CreateR1(gtl::ArraySlice<int64>(int64_values)); Tensor host_tensor; EXPECT_EQ("Cannot convert literal of type S64 to tensor of type int32", LiteralToHostTensor(*int64_values_literal, DT_INT32, &host_tensor) @@ -48,7 +48,7 @@ TEST(LiteralUtil, LiteralToHostTensor) { Tensor host_tensor; std::vector<int32> int32_values = {10, 11}; std::unique_ptr<xla::Literal> int32_values_literal = - xla::LiteralUtil::CreateR1(gtl::ArraySlice<int32>(int32_values)); + xla::Literal::CreateR1(gtl::ArraySlice<int32>(int32_values)); EXPECT_TRUE( LiteralToHostTensor(*int32_values_literal, DT_INT32, &host_tensor) .ok()); diff --git a/tensorflow/compiler/tf2xla/xla_compiler_test.cc b/tensorflow/compiler/tf2xla/xla_compiler_test.cc index 58d74057d1..427b14534f 100644 --- a/tensorflow/compiler/tf2xla/xla_compiler_test.cc +++ b/tensorflow/compiler/tf2xla/xla_compiler_test.cc @@ -163,9 +163,9 @@ TEST_F(XlaCompilerTest, Simple) { // Tests that the generated computation works. std::unique_ptr<xla::Literal> param0_literal = - xla::LiteralUtil::CreateR1<int32>({7, 42}); + xla::Literal::CreateR1<int32>({7, 42}); std::unique_ptr<xla::Literal> param1_literal = - xla::LiteralUtil::CreateR1<int32>({-3, 101}); + xla::Literal::CreateR1<int32>({-3, 101}); std::unique_ptr<xla::GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); std::unique_ptr<xla::GlobalData> param1_data = @@ -179,7 +179,7 @@ TEST_F(XlaCompilerTest, Simple) { client_->Transfer(*actual).ConsumeValueOrDie(); std::unique_ptr<xla::Literal> expected_literal = - xla::LiteralUtil::CreateR1<int32>({4, 143}); + xla::Literal::CreateR1<int32>({4, 143}); xla::LiteralTestUtil::ExpectEqual(*expected_literal, *actual_literal); } @@ -225,7 +225,7 @@ TEST_F(XlaCompilerTest, ConstantOutputs) { // Tests that the generated computation works. std::unique_ptr<xla::Literal> param0_literal = - xla::LiteralUtil::CreateR1<int32>({7, 42}); + xla::Literal::CreateR1<int32>({7, 42}); std::unique_ptr<xla::GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); @@ -236,7 +236,7 @@ TEST_F(XlaCompilerTest, ConstantOutputs) { client_->Transfer(*actual).ConsumeValueOrDie(); std::unique_ptr<xla::Literal> expected_literal = - xla::LiteralUtil::CreateR1<int32>({-7, -42}); + xla::Literal::CreateR1<int32>({-7, -42}); xla::LiteralTestUtil::ExpectEqual(*expected_literal, *actual_literal); } @@ -260,7 +260,7 @@ TEST_F(XlaCompilerTest, ConstantOutputs) { // Tests that the generated computation works. std::unique_ptr<xla::Literal> param0_literal = - xla::LiteralUtil::CreateR1<int32>({7, 42}); + xla::Literal::CreateR1<int32>({7, 42}); std::unique_ptr<xla::GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); @@ -270,12 +270,11 @@ TEST_F(XlaCompilerTest, ConstantOutputs) { std::unique_ptr<xla::Literal> actual_literal = client_->Transfer(*actual).ConsumeValueOrDie(); - std::unique_ptr<xla::Literal> expected0 = - xla::LiteralUtil::CreateR0<int32>(7); + std::unique_ptr<xla::Literal> expected0 = xla::Literal::CreateR0<int32>(7); std::unique_ptr<xla::Literal> expected1 = - xla::LiteralUtil::CreateR1<int32>({-7, -42}); + xla::Literal::CreateR1<int32>({-7, -42}); std::unique_ptr<xla::Literal> expected = - xla::LiteralUtil::MakeTuple({expected0.get(), expected1.get()}); + xla::Literal::MakeTuple({expected0.get(), expected1.get()}); xla::LiteralTestUtil::ExpectEqual(*expected, *actual_literal); } } diff --git a/tensorflow/compiler/tf2xla/xla_helpers.cc b/tensorflow/compiler/tf2xla/xla_helpers.cc index f060f8f2f1..2366c02dd2 100644 --- a/tensorflow/compiler/tf2xla/xla_helpers.cc +++ b/tensorflow/compiler/tf2xla/xla_helpers.cc @@ -30,28 +30,28 @@ xla::ComputationDataHandle XlaHelpers::MinValue(xla::ComputationBuilder* b, DataType data_type) { xla::PrimitiveType type; TF_CHECK_OK(DataTypeToPrimitiveType(data_type, &type)); - return b->ConstantLiteral(xla::LiteralUtil::MinValue(type)); + return b->ConstantLiteral(xla::Literal::MinValue(type)); } xla::ComputationDataHandle XlaHelpers::MaxValue(xla::ComputationBuilder* b, DataType data_type) { xla::PrimitiveType type; TF_CHECK_OK(DataTypeToPrimitiveType(data_type, &type)); - return b->ConstantLiteral(xla::LiteralUtil::MaxValue(type)); + return b->ConstantLiteral(xla::Literal::MaxValue(type)); } xla::ComputationDataHandle XlaHelpers::Zero(xla::ComputationBuilder* b, DataType data_type) { xla::PrimitiveType type; TF_CHECK_OK(DataTypeToPrimitiveType(data_type, &type)); - return b->ConstantLiteral(xla::LiteralUtil::Zero(type)); + return b->ConstantLiteral(xla::Literal::Zero(type)); } xla::ComputationDataHandle XlaHelpers::One(xla::ComputationBuilder* b, DataType data_type) { xla::PrimitiveType type; TF_CHECK_OK(DataTypeToPrimitiveType(data_type, &type)); - return b->ConstantLiteral(xla::LiteralUtil::One(type)); + return b->ConstantLiteral(xla::Literal::One(type)); } xla::ComputationDataHandle XlaHelpers::IntegerLiteral( @@ -61,28 +61,28 @@ xla::ComputationDataHandle XlaHelpers::IntegerLiteral( TF_CHECK_OK(DataTypeToPrimitiveType(data_type, &type)); switch (type) { case xla::U8: - literal = *xla::LiteralUtil::CreateR0<uint8>(value); + literal = *xla::Literal::CreateR0<uint8>(value); break; case xla::U32: - literal = *xla::LiteralUtil::CreateR0<uint32>(value); + literal = *xla::Literal::CreateR0<uint32>(value); break; case xla::U64: - literal = *xla::LiteralUtil::CreateR0<uint64>(value); + literal = *xla::Literal::CreateR0<uint64>(value); break; case xla::S8: - literal = *xla::LiteralUtil::CreateR0<int8>(value); + literal = *xla::Literal::CreateR0<int8>(value); break; case xla::S32: - literal = *xla::LiteralUtil::CreateR0<int32>(value); + literal = *xla::Literal::CreateR0<int32>(value); break; case xla::S64: - literal = *xla::LiteralUtil::CreateR0<int64>(value); + literal = *xla::Literal::CreateR0<int64>(value); break; case xla::F32: - literal = *xla::LiteralUtil::CreateR0<float>(value); + literal = *xla::Literal::CreateR0<float>(value); break; case xla::F64: - literal = *xla::LiteralUtil::CreateR0<double>(value); + literal = *xla::Literal::CreateR0<double>(value); break; case xla::PRED: LOG(FATAL) << "pred element type is not integral"; @@ -91,7 +91,7 @@ xla::ComputationDataHandle XlaHelpers::IntegerLiteral( LOG(FATAL) << "u16/s16 literals not yet implemented"; case xla::F16: literal = - *xla::LiteralUtil::CreateR0<xla::half>(static_cast<xla::half>(value)); + *xla::Literal::CreateR0<xla::half>(static_cast<xla::half>(value)); break; case xla::TUPLE: LOG(FATAL) << "tuple element type is not integral"; diff --git a/tensorflow/compiler/tf2xla/xla_op_kernel.cc b/tensorflow/compiler/tf2xla/xla_op_kernel.cc index edb7e2a563..d606b32931 100644 --- a/tensorflow/compiler/tf2xla/xla_op_kernel.cc +++ b/tensorflow/compiler/tf2xla/xla_op_kernel.cc @@ -144,9 +144,9 @@ static Status LiteralToInt64Scalar(const xla::Literal& literal, int64* out) { return errors::InvalidArgument("value is not a scalar"); } if (literal.shape().element_type() == xla::S32) { - *out = xla::LiteralUtil::Get<int32>(literal, {}); + *out = literal.Get<int32>({}); } else if (literal.shape().element_type() == xla::S64) { - *out = xla::LiteralUtil::Get<int64>(literal, {}); + *out = literal.Get<int64>({}); } else { return errors::InvalidArgument("value must be either int32 or int64"); } @@ -168,11 +168,11 @@ static Status LiteralToInt64Vector(const xla::Literal& literal, int64 size = xla::ShapeUtil::ElementsIn(literal.shape()); if (literal.shape().element_type() == xla::S32) { for (int64 i = 0; i < size; ++i) { - out->push_back(xla::LiteralUtil::Get<int32>(literal, {i})); + out->push_back(literal.Get<int32>({i})); } } else if (literal.shape().element_type() == xla::S64) { for (int64 i = 0; i < size; ++i) { - out->push_back(xla::LiteralUtil::Get<int64>(literal, {i})); + out->push_back(literal.Get<int64>({i})); } } else { return errors::InvalidArgument("value must be either int32 or int64"); diff --git a/tensorflow/compiler/xla/client/computation_builder.h b/tensorflow/compiler/xla/client/computation_builder.h index 6a87784f6a..5dceb03281 100644 --- a/tensorflow/compiler/xla/client/computation_builder.h +++ b/tensorflow/compiler/xla/client/computation_builder.h @@ -826,87 +826,80 @@ class ComputationBuilder { template <typename NativeT> ComputationDataHandle ComputationBuilder::ConstantR0(NativeT value) { - return ConstantOp( - [value](Literal* literal) { LiteralUtil::PopulateR0(value, literal); }); + return ConstantOp([value](Literal* literal) { literal->PopulateR0(value); }); } template <typename NativeT> ComputationDataHandle ComputationBuilder::ConstantR1( tensorflow::gtl::ArraySlice<NativeT> values) { - return ConstantOp([&values](Literal* literal) { - LiteralUtil::PopulateR1(values, literal); - }); + return ConstantOp( + [&values](Literal* literal) { literal->PopulateR1(values); }); } template <typename NativeT> ComputationDataHandle ComputationBuilder::ConstantR1(int64 length, NativeT value) { return ConstantOp([length, value](Literal* literal) { - LiteralUtil::PopulateWithValue(value, {length}, literal); + literal->PopulateWithValue(value, {length}); }); } inline ComputationDataHandle ComputationBuilder::ConstantR1( const tensorflow::core::Bitmap& values) { - return ConstantOp([&values](Literal* literal) { - LiteralUtil::PopulateR1(values, literal); - }); + return ConstantOp( + [&values](Literal* literal) { literal->PopulateR1(values); }); } template <typename NativeT> ComputationDataHandle ComputationBuilder::ConstantR2( std::initializer_list<std::initializer_list<NativeT>> values) { - return ConstantOp([&values](Literal* literal) { - LiteralUtil::PopulateR2(values, literal); - }); + return ConstantOp( + [&values](Literal* literal) { literal->PopulateR2(values); }); } template <typename NativeT> ComputationDataHandle ComputationBuilder::ConstantR2FromArray2DWithLayout( const Array2D<NativeT>& values, const Layout& layout) { return ConstantOp([&values, &layout](Literal* literal) { - LiteralUtil::PopulateR2FromArray2DWithLayout(values, layout, literal); + literal->PopulateR2FromArray2DWithLayout(values, layout); }); } template <typename NativeT> ComputationDataHandle ComputationBuilder::ConstantR2FromArray2D( const Array2D<NativeT>& values) { - return ConstantOp([&values](Literal* literal) { - LiteralUtil::PopulateR2FromArray2D(values, literal); - }); + return ConstantOp( + [&values](Literal* literal) { literal->PopulateR2FromArray2D(values); }); } template <typename NativeT> ComputationDataHandle ComputationBuilder::ConstantR3FromArray3DWithLayout( const Array3D<NativeT>& values, const Layout& layout) { return ConstantOp([&values, &layout](Literal* literal) { - LiteralUtil::PopulateR3FromArray3DWithLayout(values, layout, literal); + literal->PopulateR3FromArray3DWithLayout(values, layout); }); } template <typename NativeT> ComputationDataHandle ComputationBuilder::ConstantR3FromArray3D( const Array3D<NativeT>& values) { - return ConstantOp([&values](Literal* literal) { - LiteralUtil::PopulateR3FromArray3D(values, literal); - }); + return ConstantOp( + [&values](Literal* literal) { literal->PopulateR3FromArray3D(values); }); } template <typename NativeT> ComputationDataHandle ComputationBuilder::ConstantR4FromArray4DWithLayout( const Array4D<NativeT>& values, const Layout& layout) { return ConstantOp([&values, &layout](Literal* literal) { - LiteralUtil::PopulateR4FromArray4DWithLayout(values, layout, literal); + literal->PopulateR4FromArray4DWithLayout(values, layout); }); } template <typename NativeT> ComputationDataHandle ComputationBuilder::ConstantR4FromArray4D( const Array4D<NativeT>& values) { - return ConstantOp([&values](Literal* literal) { - LiteralUtil::PopulateR4FromArray4D(values, literal); - }); + return ConstantOp( + [&values](Literal* literal) { literal->PopulateR4FromArray4D(values); }); } } // namespace xla diff --git a/tensorflow/compiler/xla/client/lib/testing.cc b/tensorflow/compiler/xla/client/lib/testing.cc index daa1557df0..ffdc7dd943 100644 --- a/tensorflow/compiler/xla/client/lib/testing.cc +++ b/tensorflow/compiler/xla/client/lib/testing.cc @@ -34,7 +34,7 @@ std::unique_ptr<GlobalData> MakeFakeDataOrDie(const Shape& shape, client, tensorflow::strings::StrCat("make_fake_", ShapeUtil::HumanString(shape))); // TODO(b/26811613): Replace this when RNG is supported on all backends. - b.Broadcast(b.ConstantLiteral(LiteralUtil::One(shape.element_type())), + b.Broadcast(b.ConstantLiteral(Literal::One(shape.element_type())), AsInt64Slice(shape.dimensions())); Computation computation = b.Build().ConsumeValueOrDie(); diff --git a/tensorflow/compiler/xla/literal_util.h b/tensorflow/compiler/xla/literal_util.h index 64e58e32fb..caee6cc0a0 100644 --- a/tensorflow/compiler/xla/literal_util.h +++ b/tensorflow/compiler/xla/literal_util.h @@ -646,539 +646,6 @@ class Literal { std::vector<Literal> tuple_literals_; }; -// Utility class for dealing with XLA literal values. Most methods are -// templated by native (host) type which corresponds to a unique XLA -// PrimitiveType. See ComputationBuilder for details. Not all primitive types -// defined in xla_data.proto have a corresponding native type or even have a -// storage location in the Literal proto yet (for example, primitive type F16). -// -// TODO(dnovillo) - All functions in this class simply redirect to the -// corresponding function in class Literal. Remove this class after converting -// all user code to use Literal directly. -class LiteralUtil { - public: - // Creates new literal of a given rank. To minimize ambiguity (for users and - // the compiler) these CreateR[0-2] methods should explicitly specify the - // native type. For example: - // - // CreateR1<float>({1.0, 42.0}); - // CreateR2<uint32>({{1, 2}, {3, 4}}); - // - // The variants not ending with WithLayout use the default XLA layout for the - // literal's linear representation in memory. - template <typename NativeT> - static std::unique_ptr<Literal> CreateR0(NativeT value) { - return Literal::CreateR0(value); - } - - template <typename NativeT> - static std::unique_ptr<Literal> CreateR1( - tensorflow::gtl::ArraySlice<NativeT> values) { - return Literal::CreateR1(values); - } - - static std::unique_ptr<Literal> CreateR1( - const tensorflow::core::Bitmap& values) { - return Literal::CreateR1(values); - } - - template <typename NativeT> - static std::unique_ptr<Literal> CreateR2( - std::initializer_list<std::initializer_list<NativeT>> values) { - return Literal::CreateR2(values); - } - - template <typename NativeT> - static std::unique_ptr<Literal> CreateR2WithLayout( - std::initializer_list<std::initializer_list<NativeT>> values, - const Layout& layout) { - return Literal::CreateR2WithLayout(values, layout); - } - - template <typename NativeT> - static std::unique_ptr<Literal> CreateR3( - std::initializer_list< - std::initializer_list<std::initializer_list<NativeT>>> - values) { - return Literal::CreateR3(values); - } - - template <typename NativeT> - static std::unique_ptr<Literal> CreateR3WithLayout( - std::initializer_list< - std::initializer_list<std::initializer_list<NativeT>>> - values, - const Layout& layout) { - return Literal::CreateR3WithLayout(values, layout); - } - - template <typename NativeT> - static std::unique_ptr<Literal> CreateR4( - std::initializer_list<std::initializer_list< - std::initializer_list<std::initializer_list<NativeT>>>> - values) { - return Literal::CreateR4(values); - } - - template <typename NativeT> - static std::unique_ptr<Literal> CreateR4WithLayout( - std::initializer_list<std::initializer_list< - std::initializer_list<std::initializer_list<NativeT>>>> - values, - const Layout& layout) { - return Literal::CreateR4WithLayout(values, layout); - } - - // Creates a new Literal object with the shape specified as parameter. - // The content of the literal values is the default value of the primitive - // type of literal itself (0 for numeric types, and false for predicates). - static std::unique_ptr<Literal> CreateFromShape(const Shape& shape) { - return Literal::CreateFromShape(shape); - } - - // Creates a new Literal object with its values havings the primitive_type - // type, and with dimensions defined by the dimensions parameter. - // The content of the literal values is the default value of the primitive - // type of literal itself (0 for numeric types, and false for predicates). - static std::unique_ptr<Literal> CreateFromDimensions( - PrimitiveType primitive_type, - tensorflow::gtl::ArraySlice<int64> dimensions) { - return Literal::CreateFromDimensions(primitive_type, dimensions); - } - - // Copies the values from src_literal, starting at src_base shape indexes, - // to dest_literal, starting at dest_base, where the copy size in each - // dimension is specified by copy_size. - // - // The src_literal and dest_literal must have the same primitive type, - // src_base+copy_size must fit the source literal dimensions, as well as - // dest_base+copy_size must fit the destination literal dimensions. - static Status Copy(const Literal& src_literal, - tensorflow::gtl::ArraySlice<int64> src_base, - Literal* dest_literal, - tensorflow::gtl::ArraySlice<int64> dest_base, - tensorflow::gtl::ArraySlice<int64> copy_size) { - return dest_literal->Copy(src_literal, src_base, dest_base, copy_size); - } - - // Creates a new value that has the equivalent value as literal, but conforms - // to new_layout; e.g. a literal matrix that was in {0, 1} minor-to-major - // dimension layout can be re-laid-out as {1, 0} minor-to-major dimension - // layout and the value in the cell at any given logical index (i0, i1) will - // be the same. - // - // Note: this is useful when the client wants to ensure that a value placed in - // the XLA allocation tracker has a particular layout; for efficiency - // purposes or avoiding unimplemented operation/layout combinations. - static std::unique_ptr<Literal> Relayout(const Literal& literal, - const Layout& new_layout) { - return literal.Relayout(new_layout); - } - - // Reshapes literal 'input' to have 'shape'. Both the original shape and - // 'shape' must contain the same number of elements. The implementation - // currently only supports monotonic dim0-major layouts. - static StatusOr<std::unique_ptr<Literal>> Reshape( - const xla::Literal& input, tensorflow::gtl::ArraySlice<int64> shape) { - return input.Reshape(shape); - } - - // Creates a new literal by reordering the dimensions of the original literal. - // The given `permutation` must be a permutation of the dimension numbers - // in the original literal, and it specifies the order of the new dimensions - // in the result literal (i.e., new_order[i] = old_order[permutation[i]]). - // For example, a transpose call on a literal of shape [3 x 8 x 4] and - // `permutation` = {2, 0, 1} returns a new literal of shape [4 x 3 x 8]. - static std::unique_ptr<Literal> Transpose( - const Literal& literal, tensorflow::gtl::ArraySlice<int64> permutation) { - return literal.Transpose(permutation); - } - - // Creates a sub-array from the given literal by extracting the indices - // [start_index, limit_index) of each dimension. The result literal has the - // same rank and layout as for the given literal. The number of indices in - // start_indices and limit_indices must be the rank of the literal, and the - // indices follow the order of the dimensions. - static std::unique_ptr<Literal> Slice( - const Literal& literal, tensorflow::gtl::ArraySlice<int64> start_indices, - tensorflow::gtl::ArraySlice<int64> limit_indices) { - return literal.Slice(start_indices, limit_indices); - } - - // Creates a literal with a prepended dimension with bound "times"; e.g. a - // f32[3x2] with times=4 will produce a f32[4x3x2] with the 3x2 from the input - // literal replicated four times. - template <typename NativeT> - static std::unique_ptr<Literal> Replicate(const Literal& input, int64 times) { - return input.Replicate<NativeT>(times); - } - - // Creates a literal by converting each element in an original literal to a - // new type. - template <typename NativeSrcT, typename NativeDestT> - static std::unique_ptr<Literal> Convert(const Literal& literal) { - return literal.Convert<NativeSrcT, NativeDestT>(); - } - - // Creates a literal value zero of the given primitive type. - static Literal Zero(PrimitiveType primitive_type) { - return Literal::Zero(primitive_type); - } - - // Creates a literal value one of the given primitive type. - static Literal One(PrimitiveType primitive_type) { - return Literal::One(primitive_type); - } - - // Creates a literal value containing the minimum value of the given - // primitive type. For floating-point types, returns -inf. - static Literal MinValue(PrimitiveType primitive_type) { - return Literal::MinValue(primitive_type); - } - - // Creates a literal value containing the maximum value of the given - // primitive type. For floating-point types, returns inf. - static Literal MaxValue(PrimitiveType primitive_type) { - return Literal::MaxValue(primitive_type); - } - - // Creates a literal of the given shape where each element is `value`. - template <typename NativeT> - static std::unique_ptr<Literal> CreateFullWithMonotonicDim0MajorLayout( - tensorflow::gtl::ArraySlice<int64> dimensions, NativeT value) { - return Literal::CreateFullWithMonotonicDim0MajorLayout(dimensions, value); - } - - // Creates a new literal from an array. The variants not ending with - // WithLayout use the default XLA layout for the literal's linear - // representation in memory. - template <typename NativeT> - static std::unique_ptr<Literal> CreateR2FromArray2D( - const Array2D<NativeT>& values) { - return Literal::CreateR2FromArray2D(values); - } - - template <typename NativeT> - static std::unique_ptr<Literal> CreateR2FromArray2DWithLayout( - const Array2D<NativeT>& values, const Layout& layout) { - return Literal::CreateR2FromArray2DWithLayout(values, layout); - } - - template <typename NativeT> - static std::unique_ptr<Literal> CreateR3FromArray3D( - const Array3D<NativeT>& values) { - return Literal::CreateR3FromArray3D(values); - } - - template <typename NativeT> - static std::unique_ptr<Literal> CreateR3FromArray3DWithLayout( - const Array3D<NativeT>& values, const Layout& layout) { - return Literal::CreateR3FromArray3DWithLayout(values, layout); - } - - template <typename NativeT> - static std::unique_ptr<Literal> CreateR4FromArray4D( - const Array4D<NativeT>& values) { - return Literal::CreateR4FromArray4D(values); - } - - template <typename NativeT> - static std::unique_ptr<Literal> CreateR4FromArray4DWithLayout( - const Array4D<NativeT>& values, const Layout& layout) { - return Literal::CreateR4FromArray4DWithLayout(values, layout); - } - - // Creates a new vector of U8s literal value from a string. - static std::unique_ptr<Literal> CreateR1U8(tensorflow::StringPiece value) { - return Literal::CreateR1U8(value); - } - - // Creates a linspace-populated literal with the given number of rows and - // columns. - static std::unique_ptr<Literal> CreateR2F32Linspace(float from, float to, - int64 rows, int64 cols) { - return Literal::CreateR2F32Linspace(from, to, rows, cols); - } - - // Creates a literal that projects the (x, y) dimensions given in values into - // the z dimension given by "projection". - template <typename NativeT> - static std::unique_ptr<Literal> CreateR3Projected( - std::initializer_list<std::initializer_list<NativeT>> values, - int64 projection) { - return Literal::CreateR3Projected(values, projection); - } - - // Creates a literal that projects the (x, y) dimensions given in values into - // the z and p dimensions given. - template <typename NativeT> - static std::unique_ptr<Literal> CreateR4Projected( - std::initializer_list<std::initializer_list<NativeT>> values, - int64 projection_p, int64 projection_z) { - return Literal::CreateR4Projected(values, projection_p, projection_z); - } - - // Clones literal into an owned unique_ptr version. - static std::unique_ptr<Literal> CloneToUnique(const Literal& literal) { - return literal.CloneToUnique(); - } - - // Returns the linear index of the given index within the literal's - // element_type repeated field. - static int64 LinearIndex(const Literal& literal, - tensorflow::gtl::ArraySlice<int64> multi_index) { - return literal.LinearIndex(multi_index); - } - - // Gets or sets an element in the literal at the given index. The index is - // CHECKed against the dimension sizes. - template <typename NativeT> - static NativeT Get(const Literal& literal, - tensorflow::gtl::ArraySlice<int64> multi_index) { - return literal.Get<NativeT>(multi_index); - } - - template <typename NativeT> - static void Set(Literal* literal, - tensorflow::gtl::ArraySlice<int64> multi_index, - NativeT value) { - literal->Set(multi_index, value); - } - - // Retrieves the mutable array slice interface which can be used to manipulate - // pre-allocated literal values. - template <typename NativeT> - static tensorflow::gtl::MutableArraySlice<NativeT> GetMutableArraySlice( - Literal* literal) { - return literal->GetMutableArraySlice<NativeT>(); - } - - // Returns the element value at index (0, ..., 0), however many zeroes are - // required for that index. - template <typename NativeT> - static NativeT GetFirstElement(const Literal& literal) { - return literal.GetFirstElement<NativeT>(); - } - - // As Get(), but determines the correct type and converts the value - // into text. - static string GetAsString(const Literal& literal, - tensorflow::gtl::ArraySlice<int64> multi_index) { - return literal.GetAsString(multi_index); - } - - // Returns an identity matrix (rank 2) with the given row and column count. - template <typename NativeT> - static std::unique_ptr<Literal> MakeIdentityR2(int64 size) { - return Literal::MakeIdentityR2<NativeT>(size); - } - - // Returns a tuple literal composed of given literals. - static std::unique_ptr<Literal> MakeTuple( - tensorflow::gtl::ArraySlice<const Literal*> elements) { - return Literal::MakeTuple(elements); - } - - // Validates that the data payload of the literal matches the literal shape; - // if it does not, an appropriate status is returned. - static tensorflow::Status ValidateLiteral(const Literal& literal) { - return literal.ValidateLiteral(); - } - - // Returns a string representation of the literal value. - static string ToString(const Literal& literal) { return literal.ToString(); } - - // Invokes the "per cell" callback for each element in the provided - // literal with the element's indices and a string representation of - // the element's value. - // - // This function is useful if you want a polymorphic representation - // of the tensor's elements (turning it to a string for something - // like representation in a protobuf). - static void EachCellAsString( - const Literal& literal, - const std::function<void(tensorflow::gtl::ArraySlice<int64> indices, - const string& value)>& per_cell) { - literal.EachCellAsString(per_cell); - } - - template <typename NativeT> - static void EachCell( - const Literal& literal, - std::function<void(tensorflow::gtl::ArraySlice<int64> indices, - NativeT value)> - per_cell) { - literal.EachCell<NativeT>(per_cell); - } - - // Templated methods which populate the given repeated field in the Literal - // proto with the given value(s). The Shape field of the Literal proto is set - // to match the array dimensions and type. Examples: - // - // // Populate with floats. - // Array2D<float> float_values = ... - // PopulateR2FromArray2D(values, literal); - // - // // Populate with int32s. - // PopulateR2({{1, 2}, {3, 4}}, literal); - // - template <typename NativeT> - static void PopulateR0(NativeT values, Literal* literal) { - literal->PopulateR0(values); - } - - template <typename NativeT> - static void PopulateR1(tensorflow::gtl::ArraySlice<NativeT> values, - Literal* literal) { - literal->PopulateR1(values); - } - - static void PopulateR1(const tensorflow::core::Bitmap& values, - Literal* literal) { - literal->PopulateR1(values); - } - - template <typename NativeT> - static void PopulateR2( - std::initializer_list<std::initializer_list<NativeT>> values, - Literal* literal) { - literal->PopulateR2(values); - } - - template <typename NativeT> - static void PopulateR2WithLayout( - std::initializer_list<std::initializer_list<NativeT>> values, - const Layout& layout, Literal* literal) { - literal->PopulateR2WithLayout(values, layout); - } - - template <typename NativeT> - static void PopulateR2FromArray2D(const Array2D<NativeT>& values, - Literal* literal) { - literal->PopulateR2FromArray2D(values); - } - - template <typename NativeT> - static void PopulateR2FromArray2DWithLayout(const Array2D<NativeT>& values, - const Layout& layout, - Literal* literal) { - literal->PopulateR2FromArray2DWithLayout(values, layout); - } - - template <typename NativeT> - static void PopulateR3FromArray3D(const Array3D<NativeT>& values, - Literal* literal) { - literal->PopulateR3FromArray3D(values); - } - - template <typename NativeT> - static void PopulateR3FromArray3DWithLayout(const Array3D<NativeT>& values, - const Layout& layout, - Literal* literal) { - literal->PopulateR3FromArray3DWithLayout(values, layout); - } - - template <typename NativeT> - static void PopulateR4FromArray4D(const Array4D<NativeT>& values, - Literal* literal) { - literal->PopulateR4FromArray4D(values); - } - - template <typename NativeT> - static void PopulateR4FromArray4DWithLayout(const Array4D<NativeT>& values, - const Layout& layout, - Literal* literal) { - literal->PopulateR4FromArray4DWithLayout(values, layout); - } - - // Populates literal values by calling the generator function for every cell - // in the literal object. - template <typename NativeT> - static Status Populate( - Literal* literal, - const std::function<NativeT(tensorflow::gtl::ArraySlice<int64> indexes)>& - generator) { - return literal->Populate(generator); - } - - // Creates a Literal of the given dimensions with all elements set to the - // given value. - template <typename NativeT> - static void PopulateWithValue(NativeT value, - tensorflow::gtl::ArraySlice<int64> dimensions, - Literal* literal) { - return literal->PopulateWithValue(value, dimensions); - } - - // Returns a pointer to the underlying vector containing the array data. Use - // with care. - static const void* InternalData(const Literal& literal) { - return literal.InternalData(); - } - - static void* MutableInternalData(Literal* literal) { - return literal->MutableInternalData(); - } - - // Allocates space in the underlying vector of the literal sufficient to hold - // num_elements of the literal's primitive type. Values in the vector are set - // to zero. num_elements must equal the number of elements in the literals - // shape. - static void Reserve(int64 num_elements, Literal* literal) { - literal->Reserve(num_elements); - } - - // Allocates space in the underlying vector of the literal sufficient to hold - // num_elements of the literal's primitive type and sets each element in the - // literal to the given value. num_elements must equal the number of elements - // in the literals shape. - template <typename NativeT> - static void Resize(int64 num_elements, NativeT value, Literal* literal) { - literal->Resize(num_elements, value); - } - - // Returns true if the two given literals have the same shape and - // values. Layout is not considered in the comparison. - static bool Equal(const Literal& literal1, const Literal& literal2) { - return literal1.Equal(literal2); - } - - // Returns whether every element in the given literal is equal to value. - // - // value is an int8 because we expect this to be called with small - // compile-time constants (0, -1, etc.) and so that whatever value you pass - // can be represented exactly by floating-point types as small as 16 bits. - // - // If value doesn't fit in literal's type, returns false. Values of 1/0 are - // considered equal to true/false; other values are not considered equal to - // true. - static bool IsAll(const Literal& literal, int8 value) { - return literal.IsAll(value); - } - - // Like IsAll(const Literal&, int8), except we check whether the literal is - // equal to a particular floating-point number. - // - // If the literal is not a floating-point value, this always returns false. - // - // This casts value to the type of literal, then compares using ==. The usual - // admonishments about floating-point equality checks apply. We expect you to - // use this to check for values that can be expressed precisely as a float, - // e.g. -0.5. - static bool IsAllFloat(const Literal& literal, float value) { - return literal.IsAllFloat(value); - } - - // Returns whether the literal is zero at the specified index. The literal - // must be an array. - static bool IsZero(const Literal& literal, - tensorflow::gtl::ArraySlice<int64> indices) { - return literal.IsZero(indices); - } - - TF_DISALLOW_COPY_AND_ASSIGN(LiteralUtil); -}; - // Declarations of template specializations for GetArraySlice and // GetMutableArraySlice. The specializations map native type to XLA primitive // type. diff --git a/tensorflow/compiler/xla/literal_util_test.cc b/tensorflow/compiler/xla/literal_util_test.cc index 50ea286b53..d52e6eb5e2 100644 --- a/tensorflow/compiler/xla/literal_util_test.cc +++ b/tensorflow/compiler/xla/literal_util_test.cc @@ -72,11 +72,11 @@ class LiteralUtilTest : public ::testing::Test { layout_r4_dim0minor_ = LayoutUtil::MakeLayout({0, 1, 2, 3}); literal_r4_2x2x3x3_dim0major_ = - LiteralUtil::CreateR4FromArray4DWithLayout<float>(arr4d, - layout_r4_dim0major_); + Literal::CreateR4FromArray4DWithLayout<float>(arr4d, + layout_r4_dim0major_); literal_r4_2x2x3x3_dim0minor_ = - LiteralUtil::CreateR4FromArray4DWithLayout<float>(arr4d, - layout_r4_dim0minor_); + Literal::CreateR4FromArray4DWithLayout<float>(arr4d, + layout_r4_dim0minor_); } Layout layout_r2_dim0major_; @@ -90,43 +90,42 @@ class LiteralUtilTest : public ::testing::Test { }; TEST_F(LiteralUtilTest, LiteralScalarToString) { - auto true_lit = LiteralUtil::CreateR0<bool>(true); - ASSERT_EQ("true", LiteralUtil::ToString(*true_lit)); + auto true_lit = Literal::CreateR0<bool>(true); + ASSERT_EQ("true", true_lit->ToString()); - auto false_lit = LiteralUtil::CreateR0<bool>(false); - ASSERT_EQ("false", LiteralUtil::ToString(*false_lit)); + auto false_lit = Literal::CreateR0<bool>(false); + ASSERT_EQ("false", false_lit->ToString()); - auto u32_lit = LiteralUtil::CreateR0<uint32>(42); - ASSERT_EQ("42", LiteralUtil::ToString(*u32_lit)); + auto u32_lit = Literal::CreateR0<uint32>(42); + ASSERT_EQ("42", u32_lit->ToString()); - auto s32_lit = LiteralUtil::CreateR0<int32>(-999); - ASSERT_EQ("-999", LiteralUtil::ToString(*s32_lit)); + auto s32_lit = Literal::CreateR0<int32>(-999); + ASSERT_EQ("-999", s32_lit->ToString()); - auto f32_lit = LiteralUtil::CreateR0<float>(3.14f); - ASSERT_EQ("3.14", LiteralUtil::ToString(*f32_lit)); + auto f32_lit = Literal::CreateR0<float>(3.14f); + ASSERT_EQ("3.14", f32_lit->ToString()); - auto f16_lit = LiteralUtil::CreateR0<half>(static_cast<half>(0.5f)); - ASSERT_EQ("0.5", LiteralUtil::ToString(*f16_lit)); + auto f16_lit = Literal::CreateR0<half>(static_cast<half>(0.5f)); + ASSERT_EQ("0.5", f16_lit->ToString()); } TEST_F(LiteralUtilTest, LiteralVectorToString) { - auto pred_vec = LiteralUtil::CreateR1<bool>({true, false, true}); - ASSERT_EQ("{101}", LiteralUtil::ToString(*pred_vec)); + auto pred_vec = Literal::CreateR1<bool>({true, false, true}); + ASSERT_EQ("{101}", pred_vec->ToString()); } TEST_F(LiteralUtilTest, R2ToString) { - const auto literal = LiteralUtil::CreateR2({{1, 2}, {3, 4}, {5, 6}}); + const auto literal = Literal::CreateR2({{1, 2}, {3, 4}, {5, 6}}); const string expected = R"(s32[3,2] { { 1, 2 }, { 3, 4 }, { 5, 6 }, })"; - ASSERT_EQ(expected, LiteralUtil::ToString(*literal)); + ASSERT_EQ(expected, literal->ToString()); } TEST_F(LiteralUtilTest, R3ToString) { - const auto literal = - LiteralUtil::CreateR3({{{1}, {2}}, {{3}, {4}}, {{5}, {6}}}); + const auto literal = Literal::CreateR3({{{1}, {2}}, {{3}, {4}}, {{5}, {6}}}); const string expected = R"(s32[3,2,1] { { { 1 }, { 2 } }, @@ -135,13 +134,13 @@ TEST_F(LiteralUtilTest, R3ToString) { { { 5 }, { 6 } } })"; - ASSERT_EQ(expected, LiteralUtil::ToString(*literal)); + ASSERT_EQ(expected, literal->ToString()); } TEST_F(LiteralUtilTest, TupleToString) { - auto scalar = LiteralUtil::CreateR0<float>(1.0); - auto matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}); - auto tuple = LiteralUtil::MakeTuple({scalar.get(), matrix.get()}); + auto scalar = Literal::CreateR0<float>(1.0); + auto matrix = Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}); + auto tuple = Literal::MakeTuple({scalar.get(), matrix.get()}); const string expected = R"((f32[], f32[2,2]) ( 1, f32[2,2] { @@ -149,7 +148,7 @@ f32[2,2] { { 3, 4 }, }, ))"; - ASSERT_EQ(expected, LiteralUtil::ToString(*tuple)); + ASSERT_EQ(expected, tuple->ToString()); } TEST_F(LiteralUtilTest, CreateR3FromArray3d) { @@ -164,9 +163,9 @@ TEST_F(LiteralUtilTest, CreateR3FromArray3d) { }); // clang-format on - auto literal = LiteralUtil::CreateR3FromArray3D(array_3d); + auto literal = Literal::CreateR3FromArray3D(array_3d); EXPECT_THAT(literal->shape().dimensions(), ElementsAre(2, 3, 2)); - string result = LiteralUtil::ToString(*literal); + string result = literal->ToString(); const string expected = R"(f32[2,3,2] { { { 1, 2 }, { 3, 4 }, @@ -180,14 +179,14 @@ TEST_F(LiteralUtilTest, CreateR3FromArray3d) { TEST_F(LiteralUtilTest, LiteralR4F32ProjectedStringifies) { // clang-format off - auto literal = LiteralUtil::CreateR4Projected<float>({ + auto literal = Literal::CreateR4Projected<float>({ {1, 2}, {1001, 1002}, {2001, 2002}, }, /*projection_p=*/1, /*projection_z=*/2); // clang-format on EXPECT_THAT(literal->shape().dimensions(), ElementsAre(1, 2, 3, 2)); - string result = LiteralUtil::ToString(*literal); + string result = literal->ToString(); const string expected = R"(f32[1,2,3,2] { { // i0=0 { // i1=0 @@ -208,7 +207,7 @@ TEST_F(LiteralUtilTest, LiteralR4F32ProjectedStringifies) { TEST_F(LiteralUtilTest, LiteralR4F32Stringifies) { EXPECT_THAT(literal_r4_2x2x3x3_dim0major_->shape().dimensions(), ElementsAre(2, 2, 3, 3)); - string result = LiteralUtil::ToString(*literal_r4_2x2x3x3_dim0major_); + string result = literal_r4_2x2x3x3_dim0major_->ToString(); const string expected = R"(f32[2,2,3,3] { { // i0=0 { // i1=0 @@ -240,14 +239,13 @@ TEST_F(LiteralUtilTest, LiteralR4F32Stringifies) { TEST_F(LiteralUtilTest, EachCellR2F32) { // clang-format off - auto literal = LiteralUtil::CreateR2<float>({ + auto literal = Literal::CreateR2<float>({ {3.1f, 4.2f}, {9.3f, 12.4f}, }); // clang-format on std::vector<std::tuple<int64, int64, string>> seen; - LiteralUtil::EachCellAsString( - *literal, + literal->EachCellAsString( [&seen](tensorflow::gtl::ArraySlice<int64> indices, const string& value) { seen.emplace_back(indices[0], indices[1], value); }); @@ -259,176 +257,161 @@ TEST_F(LiteralUtilTest, EachCellR2F32) { } TEST_F(LiteralUtilTest, ScalarEquality) { - // Test LiteralUtil::Equal with scalars. - auto f32_42 = LiteralUtil::CreateR0<float>(42.0); - auto f32_42_clone = LiteralUtil::CreateR0<float>(42.0); + // Test Literal::Equal with scalars. + auto f32_42 = Literal::CreateR0<float>(42.0); + auto f32_42_clone = Literal::CreateR0<float>(42.0); - EXPECT_TRUE(LiteralUtil::Equal(*f32_42, *f32_42)); - EXPECT_TRUE(LiteralUtil::Equal(*f32_42, *f32_42_clone)); + EXPECT_TRUE(f32_42->Equal(*f32_42)); + EXPECT_TRUE(f32_42->Equal(*f32_42_clone)); - auto f32_123 = LiteralUtil::CreateR0<float>(123.0); - EXPECT_FALSE(LiteralUtil::Equal(*f32_42, *f32_123)); + auto f32_123 = Literal::CreateR0<float>(123.0); + EXPECT_FALSE(f32_42->Equal(*f32_123)); - auto f64_42 = LiteralUtil::CreateR0<double>(42.0); - EXPECT_FALSE(LiteralUtil::Equal(*f32_42, *f64_42)); + auto f64_42 = Literal::CreateR0<double>(42.0); + EXPECT_FALSE(f32_42->Equal(*f64_42)); } TEST_F(LiteralUtilTest, NonScalarEquality) { - // Test LiteralUtil::Equal with nonscalars. - auto matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}); - auto matrix_clone = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}); - auto matrix_different = - LiteralUtil::CreateR2<float>({{4.0, 3.0}, {1.0, 2.0}}); - auto vector_literal = LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0, 4.0}); - auto scalar = LiteralUtil::CreateR0<float>(1.0); + // Test Literal::Equal with nonscalars. + auto matrix = Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}); + auto matrix_clone = Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}); + auto matrix_different = Literal::CreateR2<float>({{4.0, 3.0}, {1.0, 2.0}}); + auto vector_literal = Literal::CreateR1<float>({1.0, 2.0, 3.0, 4.0}); + auto scalar = Literal::CreateR0<float>(1.0); - EXPECT_TRUE(LiteralUtil::Equal(*matrix, *matrix)); - EXPECT_TRUE(LiteralUtil::Equal(*matrix, *matrix_clone)); - EXPECT_FALSE(LiteralUtil::Equal(*matrix, *matrix_different)); - EXPECT_FALSE(LiteralUtil::Equal(*matrix, *vector_literal)); - EXPECT_FALSE(LiteralUtil::Equal(*matrix, *scalar)); + EXPECT_TRUE(matrix->Equal(*matrix)); + EXPECT_TRUE(matrix->Equal(*matrix_clone)); + EXPECT_FALSE(matrix->Equal(*matrix_different)); + EXPECT_FALSE(matrix->Equal(*vector_literal)); + EXPECT_FALSE(matrix->Equal(*scalar)); } TEST_F(LiteralUtilTest, DifferentLayoutEquality) { - // Test LiteralUtil::Equal with literals which have different layouts. + // Test Literal::Equal with literals which have different layouts. auto colmajor = MakeUnique<Literal>(); *colmajor->mutable_shape() = ShapeUtil::MakeShape(F32, {2, 2}); *colmajor->mutable_shape()->mutable_layout() = LayoutUtil::MakeLayout({0, 1}); - LiteralUtil::Reserve(4, colmajor.get()); - LiteralUtil::Set<float>(colmajor.get(), {0, 0}, 1.0); - LiteralUtil::Set<float>(colmajor.get(), {0, 1}, 2.0); - LiteralUtil::Set<float>(colmajor.get(), {1, 0}, 3.0); - LiteralUtil::Set<float>(colmajor.get(), {1, 1}, 4.0); + colmajor.get()->Reserve(4); + colmajor.get()->Set<float>({0, 0}, 1.0); + colmajor.get()->Set<float>({0, 1}, 2.0); + colmajor.get()->Set<float>({1, 0}, 3.0); + colmajor.get()->Set<float>({1, 1}, 4.0); auto rowmajor = MakeUnique<Literal>(); *rowmajor->mutable_shape() = ShapeUtil::MakeShape(F32, {2, 2}); *rowmajor->mutable_shape()->mutable_layout() = LayoutUtil::MakeLayout({1, 0}); - LiteralUtil::Reserve(4, rowmajor.get()); - LiteralUtil::Set<float>(rowmajor.get(), {0, 0}, 1.0); - LiteralUtil::Set<float>(rowmajor.get(), {0, 1}, 2.0); - LiteralUtil::Set<float>(rowmajor.get(), {1, 0}, 3.0); - LiteralUtil::Set<float>(rowmajor.get(), {1, 1}, 4.0); + rowmajor.get()->Reserve(4); + rowmajor.get()->Set<float>({0, 0}, 1.0); + rowmajor.get()->Set<float>({0, 1}, 2.0); + rowmajor.get()->Set<float>({1, 0}, 3.0); + rowmajor.get()->Set<float>({1, 1}, 4.0); - EXPECT_TRUE(LiteralUtil::Equal(*rowmajor, *colmajor)); + EXPECT_TRUE(rowmajor->Equal(*colmajor)); } TEST_F(LiteralUtilTest, TupleEquality) { - // Test LiteralUtil::Equal with tuples. - auto scalar = LiteralUtil::CreateR0<float>(1.0); - auto matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}); - auto tuple1 = LiteralUtil::MakeTuple({scalar.get(), matrix.get()}); + // Test Literal::Equal with tuples. + auto scalar = Literal::CreateR0<float>(1.0); + auto matrix = Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}); + auto tuple1 = Literal::MakeTuple({scalar.get(), matrix.get()}); // Tuple with the same elements. One element is shared with the original // tuple, the other is a clone of the element in the original tuple. - auto scalar_clone = LiteralUtil::CreateR0<float>(1.0); - auto tuple2 = LiteralUtil::MakeTuple({scalar_clone.get(), matrix.get()}); - EXPECT_TRUE(LiteralUtil::Equal(*tuple1, *tuple2)); + auto scalar_clone = Literal::CreateR0<float>(1.0); + auto tuple2 = Literal::MakeTuple({scalar_clone.get(), matrix.get()}); + EXPECT_TRUE(tuple1->Equal(*tuple2)); // Tuple with elements reversed. - auto reversed_tuple = LiteralUtil::MakeTuple({matrix.get(), scalar.get()}); - EXPECT_FALSE(LiteralUtil::Equal(*tuple1, *reversed_tuple)); + auto reversed_tuple = Literal::MakeTuple({matrix.get(), scalar.get()}); + EXPECT_FALSE(tuple1->Equal(*reversed_tuple)); // Tuple with different value. - auto scalar_42 = LiteralUtil::CreateR0<float>(42.0); - auto different_tuple = - LiteralUtil::MakeTuple({scalar_42.get(), matrix.get()}); - EXPECT_FALSE(LiteralUtil::Equal(*tuple1, *different_tuple)); + auto scalar_42 = Literal::CreateR0<float>(42.0); + auto different_tuple = Literal::MakeTuple({scalar_42.get(), matrix.get()}); + EXPECT_FALSE(tuple1->Equal(*different_tuple)); } TEST_F(LiteralUtilTest, IsAllTuple) { - auto element1 = LiteralUtil::CreateR0<float>(0.0); - auto element2 = LiteralUtil::CreateR2<float>({{0.0, 0.0}, {0.0, 0.0}}); - auto tuple = LiteralUtil::MakeTuple({element1.get(), element1.get()}); + auto element1 = Literal::CreateR0<float>(0.0); + auto element2 = Literal::CreateR2<float>({{0.0, 0.0}, {0.0, 0.0}}); + auto tuple = Literal::MakeTuple({element1.get(), element1.get()}); // Tuples should always return false for IsAll. - EXPECT_FALSE(LiteralUtil::IsAll(*tuple, 0)); - EXPECT_FALSE(LiteralUtil::IsAll(*tuple, 1)); + EXPECT_FALSE(tuple->IsAll(0)); + EXPECT_FALSE(tuple->IsAll(1)); } TEST_F(LiteralUtilTest, IsAll) { - EXPECT_TRUE(LiteralUtil::IsAll(*LiteralUtil::CreateR0<bool>(false), 0)); - EXPECT_TRUE(LiteralUtil::IsAll(*LiteralUtil::CreateR0<bool>(true), 1)); - EXPECT_FALSE(LiteralUtil::IsAll(*LiteralUtil::CreateR0<bool>(false), 1)); - EXPECT_FALSE(LiteralUtil::IsAll(*LiteralUtil::CreateR0<bool>(false), 2)); - EXPECT_FALSE(LiteralUtil::IsAll(*LiteralUtil::CreateR0<bool>(true), 0)); - EXPECT_FALSE(LiteralUtil::IsAll(*LiteralUtil::CreateR0<bool>(true), 2)); - EXPECT_FALSE(LiteralUtil::IsAll(*LiteralUtil::CreateR0<bool>(true), -1)); + EXPECT_TRUE(Literal::CreateR0<bool>(false)->IsAll(0)); + EXPECT_TRUE(Literal::CreateR0<bool>(true)->IsAll(1)); + EXPECT_FALSE(Literal::CreateR0<bool>(false)->IsAll(1)); + EXPECT_FALSE(Literal::CreateR0<bool>(false)->IsAll(2)); + EXPECT_FALSE(Literal::CreateR0<bool>(true)->IsAll(0)); + EXPECT_FALSE(Literal::CreateR0<bool>(true)->IsAll(2)); + EXPECT_FALSE(Literal::CreateR0<bool>(true)->IsAll(-1)); // We shouldn't reinterpret int8_min as an unsigned type and then decide that // it is equal to 255. auto int8_min = std::numeric_limits<int8>::min(); - EXPECT_FALSE( - LiteralUtil::IsAll(*LiteralUtil::CreateR0<uint8>(255), int8_min)); + EXPECT_FALSE(Literal::CreateR0<uint8>(255)->IsAll(int8_min)); - EXPECT_TRUE(LiteralUtil::IsAll(*LiteralUtil::CreateR0<float>(42.0), 42)); - EXPECT_FALSE(LiteralUtil::IsAll(*LiteralUtil::CreateR0<float>(42.0001), 42)); + EXPECT_TRUE(Literal::CreateR0<float>(42.0)->IsAll(42)); + EXPECT_FALSE(Literal::CreateR0<float>(42.0001)->IsAll(42)); - EXPECT_TRUE( - LiteralUtil::IsAll(*LiteralUtil::CreateR1<int>({100, 100, 100}), 100)); - EXPECT_FALSE(LiteralUtil::IsAll( - *LiteralUtil::CreateR1<double>({100, 100, 100.001}), 100)); + EXPECT_TRUE(Literal::CreateR1<int>({100, 100, 100})->IsAll(100)); + EXPECT_FALSE(Literal::CreateR1<double>({100, 100, 100.001})->IsAll(100)); - EXPECT_TRUE( - LiteralUtil::IsAll(*LiteralUtil::CreateR2<uint64>({{8, 8}, {8, 8}}), 8)); - EXPECT_FALSE( - LiteralUtil::IsAll(*LiteralUtil::CreateR2<uint64>({{8, 8}, {8, 9}}), 8)); - EXPECT_FALSE( - LiteralUtil::IsAll(*LiteralUtil::CreateR2<uint64>({{9, 8}, {8, 8}}), 8)); + EXPECT_TRUE(Literal::CreateR2<uint64>({{8, 8}, {8, 8}})->IsAll(8)); + EXPECT_FALSE(Literal::CreateR2<uint64>({{8, 8}, {8, 9}})->IsAll(8)); + EXPECT_FALSE(Literal::CreateR2<uint64>({{9, 8}, {8, 8}})->IsAll(8)); half h8(8.0f); half h9(9.0f); - EXPECT_TRUE( - LiteralUtil::IsAll(*LiteralUtil::CreateR2<half>({{h8}, {h8}}), 8)); - EXPECT_FALSE( - LiteralUtil::IsAll(*LiteralUtil::CreateR2<half>({{h8}, {h9}}), 8)); - EXPECT_FALSE( - LiteralUtil::IsAll(*LiteralUtil::CreateR2<half>({{h9}, {h8}}), 8)); + EXPECT_TRUE(Literal::CreateR2<half>({{h8}, {h8}})->IsAll(8)); + EXPECT_FALSE(Literal::CreateR2<half>({{h8}, {h9}})->IsAll(8)); + EXPECT_FALSE(Literal::CreateR2<half>({{h9}, {h8}})->IsAll(8)); auto uint64_max = std::numeric_limits<uint64>::max(); - EXPECT_FALSE(LiteralUtil::IsAll( - *LiteralUtil::CreateR2<uint64>( - {{uint64_max, uint64_max}, {uint64_max, uint64_max}}), - -1)); + EXPECT_FALSE(Literal::CreateR2<uint64>( + {{uint64_max, uint64_max}, {uint64_max, uint64_max}}) + ->IsAll(-1)); } TEST_F(LiteralUtilTest, IsAllFloat) { // IsAllFloat always returns false when the literal is not floating-point. - EXPECT_FALSE(LiteralUtil::IsAllFloat(*LiteralUtil::CreateR0<bool>(false), 0)); - EXPECT_FALSE(LiteralUtil::IsAllFloat(*LiteralUtil::CreateR0<int8>(0), 0)); - EXPECT_FALSE(LiteralUtil::IsAllFloat(*LiteralUtil::CreateR0<uint8>(0), 0)); - EXPECT_FALSE(LiteralUtil::IsAllFloat(*LiteralUtil::CreateR0<int>(0), 0)); - - EXPECT_TRUE(LiteralUtil::IsAllFloat(*LiteralUtil::CreateR0<float>(0), 0)); - EXPECT_TRUE(LiteralUtil::IsAllFloat(*LiteralUtil::CreateR0<float>(.5), .5)); - EXPECT_TRUE(LiteralUtil::IsAllFloat(*LiteralUtil::CreateR0<float>(-.5), -.5)); + EXPECT_FALSE(Literal::CreateR0<bool>(false)->IsAllFloat(0)); + EXPECT_FALSE(Literal::CreateR0<int8>(0)->IsAllFloat(0)); + EXPECT_FALSE(Literal::CreateR0<uint8>(0)->IsAllFloat(0)); + EXPECT_FALSE(Literal::CreateR0<int>(0)->IsAllFloat(0)); + + EXPECT_TRUE(Literal::CreateR0<float>(0)->IsAllFloat(0)); + EXPECT_TRUE(Literal::CreateR0<float>(.5)->IsAllFloat(.5)); + EXPECT_TRUE(Literal::CreateR0<float>(-.5)->IsAllFloat(-.5)); + EXPECT_FALSE(Literal::CreateR0<float>(-.5)->IsAllFloat(-.49)); EXPECT_FALSE( - LiteralUtil::IsAllFloat(*LiteralUtil::CreateR0<float>(-.5), -.49)); - EXPECT_FALSE(LiteralUtil::IsAllFloat( - *LiteralUtil::CreateR2<float>({{0, 0, 0}, {0, .1, 0}}), 0)); - EXPECT_TRUE(LiteralUtil::IsAllFloat( - *LiteralUtil::CreateR2<float>({{.5, .5, .5}, {.5, .5, .5}}), .5)); - - EXPECT_TRUE(LiteralUtil::IsAllFloat(*LiteralUtil::CreateR0<double>(0), 0)); - EXPECT_TRUE(LiteralUtil::IsAllFloat(*LiteralUtil::CreateR0<double>(.5), .5)); + Literal::CreateR2<float>({{0, 0, 0}, {0, .1, 0}})->IsAllFloat(0)); EXPECT_TRUE( - LiteralUtil::IsAllFloat(*LiteralUtil::CreateR0<double>(-.5), -.5)); + Literal::CreateR2<float>({{.5, .5, .5}, {.5, .5, .5}})->IsAllFloat(.5)); + + EXPECT_TRUE(Literal::CreateR0<double>(0)->IsAllFloat(0)); + EXPECT_TRUE(Literal::CreateR0<double>(.5)->IsAllFloat(.5)); + EXPECT_TRUE(Literal::CreateR0<double>(-.5)->IsAllFloat(-.5)); + EXPECT_FALSE(Literal::CreateR0<double>(-.5)->IsAllFloat(-.49)); EXPECT_FALSE( - LiteralUtil::IsAllFloat(*LiteralUtil::CreateR0<double>(-.5), -.49)); - EXPECT_FALSE(LiteralUtil::IsAllFloat( - *LiteralUtil::CreateR2<double>({{0, 0, 0}, {0, .1, 0}}), 0)); + Literal::CreateR2<double>({{0, 0, 0}, {0, .1, 0}})->IsAllFloat(0)); } TEST_F(LiteralUtilTest, IsZero) { - auto scalar_zero = LiteralUtil::CreateR0<float>(0.0f); - auto scalar_one = LiteralUtil::CreateR0<float>(1.0f); - EXPECT_TRUE(LiteralUtil::IsZero(*scalar_zero, {})); - EXPECT_FALSE(LiteralUtil::IsZero(*scalar_one, {})); + auto scalar_zero = Literal::CreateR0<float>(0.0f); + auto scalar_one = Literal::CreateR0<float>(1.0f); + EXPECT_TRUE(scalar_zero->IsZero({})); + EXPECT_FALSE(scalar_one->IsZero({})); - auto array = LiteralUtil::CreateR2<uint32>({{1, 2, 0, 3}, {1, 0, 1, 2}}); - EXPECT_FALSE(LiteralUtil::IsZero(*array, {0, 1})); - EXPECT_TRUE(LiteralUtil::IsZero(*array, {0, 2})); - EXPECT_TRUE(LiteralUtil::IsZero(*array, {1, 1})); - EXPECT_FALSE(LiteralUtil::IsZero(*array, {1, 2})); + auto array = Literal::CreateR2<uint32>({{1, 2, 0, 3}, {1, 0, 1, 2}}); + EXPECT_FALSE(array->IsZero({0, 1})); + EXPECT_TRUE(array->IsZero({0, 2})); + EXPECT_TRUE(array->IsZero({1, 1})); + EXPECT_FALSE(array->IsZero({1, 2})); } template <typename T> @@ -440,127 +423,122 @@ TYPED_TEST_CASE(LiteralUtilTestTemplated, TestedTypes); TYPED_TEST(LiteralUtilTestTemplated, Relayout2x2) { // Make a non-integer for floating point types. TypeParam half = TypeParam(1) / TypeParam(2); - auto data = LiteralUtil::CreateR2<TypeParam>({{half, 2}, {3, 4}}); + auto data = Literal::CreateR2<TypeParam>({{half, 2}, {3, 4}}); const Layout layout01 = LayoutUtil::MakeLayout({0, 1}); const Layout layout10 = LayoutUtil::MakeLayout({1, 0}); - auto data01 = LiteralUtil::Relayout(*data, layout01); + auto data01 = data->Relayout(layout01); EXPECT_TRUE(LayoutUtil::Equal(data01->shape().layout(), layout01)); - EXPECT_TRUE(LiteralUtil::Equal(*data, *data01)); + EXPECT_TRUE(data->Equal(*data01)); - auto data10 = LiteralUtil::Relayout(*data, layout10); + auto data10 = data->Relayout(layout10); EXPECT_TRUE(LayoutUtil::Equal(data10->shape().layout(), layout10)); - EXPECT_TRUE(LiteralUtil::Equal(*data, *data10)); + EXPECT_TRUE(data->Equal(*data10)); } TEST_F(LiteralUtilTest, ReshapeR0) { - auto original = LiteralUtil::CreateR0<float>(1.7f); - auto reshape = - LiteralUtil::Reshape(*original, /*shape=*/{}).ConsumeValueOrDie(); - EXPECT_TRUE(LiteralUtil::Equal(*original, *reshape)); + auto original = Literal::CreateR0<float>(1.7f); + auto reshape = original->Reshape(/*shape=*/{}).ConsumeValueOrDie(); + EXPECT_TRUE(original->Equal(*reshape)); } TEST_F(LiteralUtilTest, ReshapeR4) { // clang-format off // F32[1x3x2x4] - auto original = LiteralUtil::CreateR4WithLayout<float>({{ + auto original = Literal::CreateR4WithLayout<float>({{ {{10, 11, 12, 13}, {14, 15, 16, 17}}, {{18, 19, 20, 21}, {22, 23, 24, 25}}, {{26, 27, 28, 29}, {30, 31, 32, 33}}, }}, layout_r4_dim0major_); // F32[1x3x4x2] - auto expected = LiteralUtil::CreateR3WithLayout<float>({ + auto expected = Literal::CreateR3WithLayout<float>({ {{10, 11}, {12, 13}, {14, 15}, {16, 17}}, {{18, 19}, {20, 21}, {22, 23}, {24, 25}}, {{26, 27}, {28, 29}, {30, 31}, {32, 33}}, }, layout_r3_dim0major_); // clang-format on - auto reshape = LiteralUtil::Reshape(*original, {3, 4, 2}).ConsumeValueOrDie(); + auto reshape = original->Reshape({3, 4, 2}).ConsumeValueOrDie(); - EXPECT_TRUE(LiteralUtil::Equal(*expected, *reshape)); + EXPECT_TRUE(expected->Equal(*reshape)); } TEST_F(LiteralUtilTest, ReshapeR4Dim0Minor) { // clang-format off // F32[1x3x2x4] - auto original = LiteralUtil::CreateR4WithLayout<float>({{ + auto original = Literal::CreateR4WithLayout<float>({{ {{10, 11, 12, 13}, {14, 15, 16, 17}}, {{18, 19, 20, 21}, {22, 23, 24, 25}}, {{26, 27, 28, 29}, {30, 31, 32, 33}}, }}, layout_r4_dim0minor_); // F32[1x3x4x2] - auto expected = LiteralUtil::CreateR3WithLayout<float>({ + auto expected = Literal::CreateR3WithLayout<float>({ {{10, 11}, {12, 13}, {14, 15}, {16, 17}}, {{18, 19}, {20, 21}, {22, 23}, {24, 25}}, {{26, 27}, {28, 29}, {30, 31}, {32, 33}}, }, layout_r3_dim0major_); // clang-format on - auto reshape = LiteralUtil::Reshape(*original, {3, 4, 2}).ConsumeValueOrDie(); + auto reshape = original->Reshape({3, 4, 2}).ConsumeValueOrDie(); - EXPECT_TRUE(LiteralUtil::Equal(*expected, *reshape)); + EXPECT_TRUE(expected->Equal(*reshape)); } TEST_F(LiteralUtilTest, TransposeR0) { - auto original = LiteralUtil::CreateR0<float>(1.7f); - auto reshape = LiteralUtil::Transpose(*original, /*permutation=*/{}); - EXPECT_TRUE(LiteralUtil::Equal(*original, *reshape)); + auto original = Literal::CreateR0<float>(1.7f); + auto reshape = original->Transpose(/*permutation=*/{}); + EXPECT_TRUE(original->Equal(*reshape)); } TEST_F(LiteralUtilTest, TransposeR4) { // clang-format off // F32[1x3x2x4] - auto original = LiteralUtil::CreateR4<float>({{ + auto original = Literal::CreateR4<float>({{ {{10, 11, 12, 13}, {14, 15, 16, 17}}, {{18, 19, 20, 21}, {22, 23, 24, 25}}, {{26, 27, 28, 29}, {30, 31, 32, 33}}, }}); // clang-format on - auto reshape = - LiteralUtil::Transpose(*original, /*permutation=*/{2, 3, 0, 1}); - - LiteralUtil::EachCell<float>( - *reshape, [&](tensorflow::gtl::ArraySlice<int64> indices, float value) { - EXPECT_EQ(value, - LiteralUtil::Get<float>(*original, {indices[2], indices[3], - indices[0], indices[1]})); + auto reshape = original->Transpose(/*permutation=*/{2, 3, 0, 1}); + + reshape->EachCell<float>( + [&](tensorflow::gtl::ArraySlice<int64> indices, float value) { + EXPECT_EQ(value, original->Get<float>( + {indices[2], indices[3], indices[0], indices[1]})); }); } TEST_F(LiteralUtilTest, TestR4RelayoutEquivalence) { // Tests that using Relayout on an array is equivalent to creating it in the // target layout in the first place. - auto dim0minor_relaid_to_dim0major = LiteralUtil::Relayout( - *literal_r4_2x2x3x3_dim0minor_, layout_r4_dim0major_); - EXPECT_TRUE(LiteralUtil::Equal(*literal_r4_2x2x3x3_dim0major_, - *dim0minor_relaid_to_dim0major)); + auto dim0minor_relaid_to_dim0major = + literal_r4_2x2x3x3_dim0minor_->Relayout(layout_r4_dim0major_); + EXPECT_TRUE( + literal_r4_2x2x3x3_dim0major_->Equal(*dim0minor_relaid_to_dim0major)); - auto dim0major_relaid_to_dim0minor = LiteralUtil::Relayout( - *literal_r4_2x2x3x3_dim0major_, layout_r4_dim0minor_); - EXPECT_TRUE(LiteralUtil::Equal(*literal_r4_2x2x3x3_dim0minor_, - *dim0major_relaid_to_dim0minor)); + auto dim0major_relaid_to_dim0minor = + literal_r4_2x2x3x3_dim0major_->Relayout(layout_r4_dim0minor_); + EXPECT_TRUE( + literal_r4_2x2x3x3_dim0minor_->Equal(*dim0major_relaid_to_dim0minor)); } TEST_F(LiteralUtilTest, TestR2LinearLayout) { // Test expected memory layout of R2 dim0-minor (column-major) literal. - auto mat_dim0minor = LiteralUtil::CreateR2WithLayout<int>( - {{1, 2, 3}, {4, 5, 6}}, layout_r2_dim0minor_); + auto mat_dim0minor = Literal::CreateR2WithLayout<int>({{1, 2, 3}, {4, 5, 6}}, + layout_r2_dim0minor_); EXPECT_EQ(mat_dim0minor->s32s_size(), 6); EXPECT_THAT(mat_dim0minor->s32s(), ElementsAre(1, 4, 2, 5, 3, 6)); // Test expected memory layout when using Relayout to row major. - auto relaid_mat_to_dim0major = - LiteralUtil::Relayout(*mat_dim0minor, layout_r2_dim0major_); + auto relaid_mat_to_dim0major = mat_dim0minor->Relayout(layout_r2_dim0major_); EXPECT_THAT(relaid_mat_to_dim0major->s32s(), ElementsAre(1, 2, 3, 4, 5, 6)); // Test expected memory layout of R2 created with dim0-major (row-major). - auto mat_dim0major = LiteralUtil::CreateR2WithLayout<int>( - {{1, 2, 3}, {4, 5, 6}}, layout_r2_dim0major_); + auto mat_dim0major = Literal::CreateR2WithLayout<int>({{1, 2, 3}, {4, 5, 6}}, + layout_r2_dim0major_); EXPECT_EQ(mat_dim0major->s32s_size(), 6); EXPECT_THAT(mat_dim0major->s32s(), ElementsAre(1, 2, 3, 4, 5, 6)); // Test expected memory layout when using Relayout to column major. - auto relaid_mat_to_dim0minor = - LiteralUtil::Relayout(*mat_dim0major, layout_r2_dim0minor_); + auto relaid_mat_to_dim0minor = mat_dim0major->Relayout(layout_r2_dim0minor_); EXPECT_THAT(relaid_mat_to_dim0minor->s32s(), ElementsAre(1, 4, 2, 5, 3, 6)); } @@ -578,8 +556,8 @@ TEST_F(LiteralUtilTest, TestR3LinearLayout) { {10, 11, 12}, }, }); // clang-format on - auto lit_dim0minor = LiteralUtil::CreateR3FromArray3DWithLayout<int>( - arr3d, layout_r3_dim0minor_); + auto lit_dim0minor = + Literal::CreateR3FromArray3DWithLayout<int>(arr3d, layout_r3_dim0minor_); EXPECT_EQ(lit_dim0minor->s32s_size(), 12); std::vector<int> expected_dim0minor{1, 7, 4, 10, 2, 8, 5, 11, 3, 9, 6, 12}; @@ -587,122 +565,120 @@ TEST_F(LiteralUtilTest, TestR3LinearLayout) { testing::ElementsAreArray(expected_dim0minor)); // Test expected memory layout when using Relayout to row major. - auto relaid_lit_to_dim0major = - LiteralUtil::Relayout(*lit_dim0minor, layout_r3_dim0major_); + auto relaid_lit_to_dim0major = lit_dim0minor->Relayout(layout_r3_dim0major_); std::vector<int> expected_dim0major{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; EXPECT_THAT(relaid_lit_to_dim0major->s32s(), testing::ElementsAreArray(expected_dim0major)); // Test expected memory layout of R3 created with dim0-major (row-major). - auto lit_dim0major = LiteralUtil::CreateR3FromArray3DWithLayout<int>( - arr3d, layout_r3_dim0major_); + auto lit_dim0major = + Literal::CreateR3FromArray3DWithLayout<int>(arr3d, layout_r3_dim0major_); EXPECT_EQ(lit_dim0major->s32s_size(), 12); EXPECT_THAT(lit_dim0major->s32s(), testing::ElementsAreArray(expected_dim0major)); // Test expected memory layout when using Relayout to column major. - auto relaid_lit_to_dim0minor = - LiteralUtil::Relayout(*lit_dim0major, layout_r3_dim0minor_); + auto relaid_lit_to_dim0minor = lit_dim0major->Relayout(layout_r3_dim0minor_); EXPECT_THAT(relaid_lit_to_dim0minor->s32s(), testing::ElementsAreArray(expected_dim0minor)); } TEST_F(LiteralUtilTest, SliceR0S32) { - auto input = LiteralUtil::CreateR0<int32>(1); - auto result = LiteralUtil::Slice(*input, {}, {}); - EXPECT_TRUE(LiteralUtil::Equal(*input, *result)); + auto input = Literal::CreateR0<int32>(1); + auto result = input->Slice({}, {}); + EXPECT_TRUE(input->Equal(*result)); } TEST_F(LiteralUtilTest, SliceR1F32) { - auto input = LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0, 4.0, 5.0}); - auto result = LiteralUtil::Slice(*input, {3}, {4}); - auto expected = LiteralUtil::CreateR1<float>({4.0}); - EXPECT_TRUE(LiteralUtil::Equal(*expected, *result)); + auto input = Literal::CreateR1<float>({1.0, 2.0, 3.0, 4.0, 5.0}); + auto result = input->Slice({3}, {4}); + auto expected = Literal::CreateR1<float>({4.0}); + EXPECT_TRUE(expected->Equal(*result)); } TEST_F(LiteralUtilTest, SliceR2U32) { - auto input_3x4 = LiteralUtil::CreateR2<uint32>( - {{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}); - auto result = LiteralUtil::Slice(*input_3x4, {0, 2}, {2, 4}); - auto expected = LiteralUtil::CreateR2<uint32>({{3, 4}, {7, 8}}); - EXPECT_TRUE(LiteralUtil::Equal(*expected, *result)); + auto input_3x4 = + Literal::CreateR2<uint32>({{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}); + auto result = input_3x4->Slice({0, 2}, {2, 4}); + auto expected = Literal::CreateR2<uint32>({{3, 4}, {7, 8}}); + EXPECT_TRUE(expected->Equal(*result)); } TEST_F(LiteralUtilTest, SliceR3U32Full) { - auto input_2x3x2 = LiteralUtil::CreateR3<uint32>( + auto input_2x3x2 = Literal::CreateR3<uint32>( {{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}}); - auto result = LiteralUtil::Slice(*input_2x3x2, {0, 0, 0}, {2, 3, 2}); - EXPECT_TRUE(LiteralUtil::Equal(*input_2x3x2, *result)); + auto result = input_2x3x2->Slice({0, 0, 0}, {2, 3, 2}); + EXPECT_TRUE(input_2x3x2->Equal(*result)); } TEST_F(LiteralUtilTest, PopulateR1S64) { Literal output; - LiteralUtil::PopulateR1<int64>({77}, &output); - auto expected = LiteralUtil::CreateR1<int64>({77}); - EXPECT_TRUE(LiteralUtil::Equal(output, *expected)); + output.PopulateR1<int64>({77}); + auto expected = Literal::CreateR1<int64>({77}); + EXPECT_TRUE(output.Equal(*expected)); } TEST_F(LiteralUtilTest, PopulateR2U64) { Literal output; - LiteralUtil::PopulateR1<uint64>({{77, 88}}, &output); - auto expected = LiteralUtil::CreateR1<uint64>({{77, 88}}); - EXPECT_TRUE(LiteralUtil::Equal(output, *expected)); + output.PopulateR1<uint64>({{77, 88}}); + auto expected = Literal::CreateR1<uint64>({{77, 88}}); + EXPECT_TRUE(output.Equal(*expected)); } TEST_F(LiteralUtilTest, PopulateWithValueR0F32) { Literal output; - LiteralUtil::PopulateWithValue<float>(2.5f, {}, &output); - auto expected = LiteralUtil::CreateR0<float>(2.5f); - EXPECT_TRUE(LiteralUtil::Equal(output, *expected)); + output.PopulateWithValue<float>(2.5f, {}); + auto expected = Literal::CreateR0<float>(2.5f); + EXPECT_TRUE(output.Equal(*expected)); } TEST_F(LiteralUtilTest, PopulateWithValueR1S64) { Literal output; - LiteralUtil::PopulateWithValue<int64>(-7, {3}, &output); - auto expected = LiteralUtil::CreateR1<int64>({-7, -7, -7}); - EXPECT_TRUE(LiteralUtil::Equal(output, *expected)); + output.PopulateWithValue<int64>(-7, {3}); + auto expected = Literal::CreateR1<int64>({-7, -7, -7}); + EXPECT_TRUE(output.Equal(*expected)); } TEST_F(LiteralUtilTest, PopulateWithValueR2U64) { Literal output; - LiteralUtil::PopulateWithValue<uint64>(42, {2, 2}, &output); - auto expected = LiteralUtil::CreateR2<uint64>({{42, 42}, {42, 42}}); - EXPECT_TRUE(LiteralUtil::Equal(output, *expected)); + output.PopulateWithValue<uint64>(42, {2, 2}); + auto expected = Literal::CreateR2<uint64>({{42, 42}, {42, 42}}); + EXPECT_TRUE(output.Equal(*expected)); } TEST_F(LiteralUtilTest, PopulateWithValueR0F16) { Literal output; half h(0.25f); - LiteralUtil::PopulateWithValue<half>(h, {}, &output); - auto expected = LiteralUtil::CreateR0<half>(h); - EXPECT_TRUE(LiteralUtil::Equal(output, *expected)); + output.PopulateWithValue<half>(h, {}); + auto expected = Literal::CreateR0<half>(h); + EXPECT_TRUE(output.Equal(*expected)); } TEST_F(LiteralUtilTest, PopulateWithValueR1F16) { Literal output; half h(0.5f); - LiteralUtil::PopulateWithValue<half>(h, {3}, &output); - auto expected = LiteralUtil::CreateR1<half>({h, h, h}); - EXPECT_TRUE(LiteralUtil::Equal(output, *expected)); + output.PopulateWithValue<half>(h, {3}); + auto expected = Literal::CreateR1<half>({h, h, h}); + EXPECT_TRUE(output.Equal(*expected)); } TEST_F(LiteralUtilTest, PopulateWithValueR2F16) { Literal output; half h(2.0f); - LiteralUtil::PopulateWithValue<half>(h, {2, 2}, &output); - auto expected = LiteralUtil::CreateR2<half>({{h, h}, {h, h}}); - EXPECT_TRUE(LiteralUtil::Equal(output, *expected)); + output.PopulateWithValue<half>(h, {2, 2}); + auto expected = Literal::CreateR2<half>({{h, h}, {h, h}}); + EXPECT_TRUE(output.Equal(*expected)); } TEST_F(LiteralUtilTest, ReplicateR2U32) { - auto input = LiteralUtil::CreateR2<uint32>( - {{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}); - auto output = LiteralUtil::Replicate<uint32>(*input, 3); - auto expected = LiteralUtil::CreateR3<uint32>( + auto input = + Literal::CreateR2<uint32>({{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}); + auto output = input->Replicate<uint32>(3); + auto expected = Literal::CreateR3<uint32>( {{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}, {{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}, {{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}}); - EXPECT_TRUE(LiteralUtil::Equal(*output, *expected)); + EXPECT_TRUE(output->Equal(*expected)); } TEST_F(LiteralUtilTest, Copy) { @@ -712,13 +688,13 @@ TEST_F(LiteralUtilTest, Copy) { for (const auto& layout : layouts) { Shape shape = ShapeUtil::MakeShapeWithLayout( primitive_util::NativeToPrimitiveType<uint32>(), dimensions, layout); - auto blank = LiteralUtil::CreateFromShape(shape); - auto source = LiteralUtil::CreateFromShape(shape); + auto blank = Literal::CreateFromShape(shape); + auto source = Literal::CreateFromShape(shape); const int64 zero_base[] = {0, 0, 0, 0}; const int64 step[] = {1, 1, 1, 1}; uint32 seqnr = 0; auto init_proc = [&](const std::vector<int64>& indexes) { - LiteralUtil::Set(source.get(), indexes, ++seqnr); + source.get()->Set(indexes, ++seqnr); return true; }; @@ -729,8 +705,7 @@ TEST_F(LiteralUtilTest, Copy) { const int64 dest_base[] = {6, 4, 12, 2}; const int64 copy_size[] = {7, 8, 11, 9}; - TF_EXPECT_OK(LiteralUtil::Copy(*source, src_base, blank.get(), dest_base, - copy_size)); + TF_EXPECT_OK(blank.get()->Copy(*source, src_base, dest_base, copy_size)); std::vector<int64> source_indexes(TF_ARRAYSIZE(dimensions), 0); std::vector<int64> blank_indexes(TF_ARRAYSIZE(dimensions), 0); bool matched = true; @@ -741,9 +716,8 @@ TEST_F(LiteralUtilTest, Copy) { std::copy(indexes.begin(), indexes.end(), blank_indexes.begin()); std::transform(blank_indexes.begin(), blank_indexes.end(), dest_base, blank_indexes.begin(), std::plus<int64>()); - auto bval = LiteralUtil::Get<uint32>(*blank, blank_indexes); - matched = (bval != 0 && - bval == LiteralUtil::Get<uint32>(*source, source_indexes)); + auto bval = blank->Get<uint32>(blank_indexes); + matched = (bval != 0 && bval == source->Get<uint32>(source_indexes)); return matched; }; ShapeUtil::ForEachIndex(source->shape(), zero_base, copy_size, step, @@ -753,25 +727,25 @@ TEST_F(LiteralUtilTest, Copy) { } TEST_F(LiteralUtilTest, CopyScalars) { - auto zero = LiteralUtil::CreateR0<uint32>(0); - auto nine = LiteralUtil::CreateR0<uint32>(9); - TF_EXPECT_OK(LiteralUtil::Copy(*nine, {}, zero.get(), {}, {})); - EXPECT_TRUE(LiteralUtil::Equal(*zero, *nine)); + auto zero = Literal::CreateR0<uint32>(0); + auto nine = Literal::CreateR0<uint32>(9); + TF_EXPECT_OK(zero.get()->Copy(*nine, {}, {}, {})); + EXPECT_TRUE(zero->Equal(*nine)); - auto vect = LiteralUtil::CreateR1<uint32>({3, 4, 9, 12, 5, 17, 21}); - TF_EXPECT_OK(LiteralUtil::Copy(*vect, {5}, zero.get(), {}, {})); - EXPECT_EQ(LiteralUtil::Get<uint32>(*zero, {}), 17); - TF_EXPECT_OK(LiteralUtil::Copy(*zero, {}, vect.get(), {4}, {})); - EXPECT_EQ(LiteralUtil::Get<uint32>(*vect, {4}), 17); + auto vect = Literal::CreateR1<uint32>({3, 4, 9, 12, 5, 17, 21}); + TF_EXPECT_OK(zero.get()->Copy(*vect, {5}, {}, {})); + EXPECT_EQ(zero->Get<uint32>({}), 17); + TF_EXPECT_OK(vect.get()->Copy(*zero, {}, {4}, {})); + EXPECT_EQ(vect->Get<uint32>({4}), 17); } TEST_F(LiteralUtilTest, F16) { // Verify that the internal data views are consistent and that they // are in little endian format // TODO - modify if we make the data format machine endianess dependent - auto m1 = LiteralUtil::CreateFromShape(ShapeUtil::MakeShape(F16, {2, 2})); + auto m1 = Literal::CreateFromShape(ShapeUtil::MakeShape(F16, {2, 2})); Literal* l1 = m1.get(); - const char* d1 = static_cast<const char*>(LiteralUtil::InternalData(*l1)); + const char* d1 = static_cast<const char*>(l1->InternalData()); EXPECT_EQ(d1[0], 0); EXPECT_EQ(d1[1], 0); EXPECT_EQ(d1[2], 0); @@ -780,14 +754,13 @@ TEST_F(LiteralUtilTest, F16) { EXPECT_EQ(d1[5], 0); EXPECT_EQ(d1[6], 0); EXPECT_EQ(d1[7], 0); - EXPECT_EQ(LiteralUtil::InternalData(*l1), - LiteralUtil::MutableInternalData(l1)); + EXPECT_EQ(l1->InternalData(), l1->MutableInternalData()); half h1(1.0f); half h2(2.0f); - auto m2 = LiteralUtil::CreateR2<half>({{h1, h2}, {h2, h1}}); + auto m2 = Literal::CreateR2<half>({{h1, h2}, {h2, h1}}); Literal* l2 = m2.get(); - const char* d2 = static_cast<const char*>(LiteralUtil::InternalData(*l2)); + const char* d2 = static_cast<const char*>(l2->InternalData()); EXPECT_EQ(d2[0], 0); EXPECT_EQ(d2[1], 0x3C); EXPECT_EQ(d2[2], 0); @@ -796,8 +769,7 @@ TEST_F(LiteralUtilTest, F16) { EXPECT_EQ(d2[5], 0x40); EXPECT_EQ(d2[6], 0); EXPECT_EQ(d2[7], 0x3C); - EXPECT_EQ(LiteralUtil::InternalData(*l2), - LiteralUtil::MutableInternalData(l2)); + EXPECT_EQ(l2->InternalData(), l2->MutableInternalData()); } TEST_F(LiteralUtilTest, Populate) { @@ -818,19 +790,19 @@ TEST_F(LiteralUtilTest, Populate) { Shape shape = ShapeUtil::MakeShapeWithLayout( primitive_util::NativeToPrimitiveType<uint32>(), data.dimensions, data.layout); - auto literal = LiteralUtil::CreateFromShape(shape); + auto literal = Literal::CreateFromShape(shape); auto generator = [&](tensorflow::gtl::ArraySlice<int64> indexes) -> uint32 { // Offsets from linear index just to avoid R0 literals to be initialized // with zero. - return LiteralUtil::LinearIndex(*literal, indexes) + 17; + return literal->LinearIndex(indexes) + 17; }; - TF_EXPECT_OK(LiteralUtil::Populate<uint32>(literal.get(), generator)); + TF_EXPECT_OK(literal.get()->Populate<uint32>(generator)); std::vector<int64> zero_base(data.dimensions.size(), 0); std::vector<int64> step(data.dimensions.size(), 1); bool matched = true; auto check_function = [&](const std::vector<int64>& indexes) { - auto value = LiteralUtil::Get<uint32>(*literal, indexes); + auto value = literal->Get<uint32>(indexes); matched = matched && (value == generator(indexes)); return matched; }; @@ -842,20 +814,20 @@ TEST_F(LiteralUtilTest, Populate) { TEST_F(LiteralUtilTest, ConvertR4) { // clang-format off - auto original = LiteralUtil::CreateR4WithLayout<int8>({{ + auto original = Literal::CreateR4WithLayout<int8>({{ {{10, 11, 12, 13}, {14, 15, 16, 17}}, {{18, 19, 20, 21}, {22, 23, 24, 25}}, {{26, 27, 28, 29}, {30, 31, 32, 33}}, }}, layout_r4_dim0major_); - auto expected = LiteralUtil::CreateR4WithLayout<uint32>({{ + auto expected = Literal::CreateR4WithLayout<uint32>({{ {{10, 11, 12, 13}, {14, 15, 16, 17}}, {{18, 19, 20, 21}, {22, 23, 24, 25}}, {{26, 27, 28, 29}, {30, 31, 32, 33}}, }}, layout_r4_dim0major_); // clang-format on - auto converted = LiteralUtil::Convert<int8, uint32>(*original); + auto converted = original->Convert<int8, uint32>(); - EXPECT_TRUE(LiteralUtil::Equal(*expected, *converted)); + EXPECT_TRUE(expected->Equal(*converted)); } TEST_F(LiteralUtilTest, CopyFromProto_Bool) { diff --git a/tensorflow/compiler/xla/packed_literal_reader.cc b/tensorflow/compiler/xla/packed_literal_reader.cc index d488830a6c..1187079906 100644 --- a/tensorflow/compiler/xla/packed_literal_reader.cc +++ b/tensorflow/compiler/xla/packed_literal_reader.cc @@ -58,8 +58,7 @@ StatusOr<std::unique_ptr<Literal>> PackedLiteralReader::Read( } int64 elements = ShapeUtil::ElementsIn(shape); - LiteralUtil::Resize(elements, std::numeric_limits<float>::quiet_NaN(), - result.get()); + result.get()->Resize(elements, std::numeric_limits<float>::quiet_NaN()); std::vector<float>* field = result->mutable_f32s(); char* data = tensorflow::bit_cast<char*>(field->data()); uint64 bytes = elements * sizeof(float); diff --git a/tensorflow/compiler/xla/reference_util_test.cc b/tensorflow/compiler/xla/reference_util_test.cc index f839ac019d..215f220258 100644 --- a/tensorflow/compiler/xla/reference_util_test.cc +++ b/tensorflow/compiler/xla/reference_util_test.cc @@ -52,7 +52,7 @@ class ReferenceUtilTest : public ::testing::Test { TEST_F(ReferenceUtilTest, TransposeArray2D) { auto result = ReferenceUtil::TransposeArray2D(*matrix_); - auto actual_literal = LiteralUtil::CreateR2FromArray2D(*result); + auto actual_literal = Literal::CreateR2FromArray2D(*result); LiteralTestUtil::ExpectR2Near<float>({{1.f, 4.f}, {2.f, 5.f}, {3.f, 6.f}}, *actual_literal, ErrorSpec(0.0001)); } @@ -62,7 +62,7 @@ TEST_F(ReferenceUtilTest, MatmulArray2D) { {7.f, 8.f}, {9.f, 10.f}, {11.f, 12.f}, }); auto result = ReferenceUtil::MatmulArray2D(*matrix_, rhs); - auto actual_literal = LiteralUtil::CreateR2FromArray2D(*result); + auto actual_literal = Literal::CreateR2FromArray2D(*result); LiteralTestUtil::ExpectR2Near<float>({{58.f, 64.f}, {139.f, 154.f}}, *actual_literal, ErrorSpec(0.0001)); } @@ -70,7 +70,7 @@ TEST_F(ReferenceUtilTest, MatmulArray2D) { TEST_F(ReferenceUtilTest, ReduceToColArray2D) { auto add = [](float lhs, float rhs) { return lhs + rhs; }; auto result = ReferenceUtil::ReduceToColArray2D(*matrix_, 0.0f, add); - auto actual_literal = LiteralUtil::CreateR1<float>(*result); + auto actual_literal = Literal::CreateR1<float>(*result); LiteralTestUtil::ExpectR1Near<float>({6.f, 15.f}, *actual_literal, ErrorSpec(0.0001)); } @@ -78,7 +78,7 @@ TEST_F(ReferenceUtilTest, ReduceToColArray2D) { TEST_F(ReferenceUtilTest, ReduceToRowArray2D) { auto add = [](float lhs, float rhs) { return lhs + rhs; }; auto result = ReferenceUtil::ReduceToRowArray2D(*matrix_, 0.0f, add); - auto actual_literal = LiteralUtil::CreateR1<float>(*result); + auto actual_literal = Literal::CreateR1<float>(*result); LiteralTestUtil::ExpectR1Near<float>({5.f, 7.f, 9.f}, *actual_literal, ErrorSpec(0.0001)); } @@ -86,7 +86,7 @@ TEST_F(ReferenceUtilTest, ReduceToRowArray2D) { TEST_F(ReferenceUtilTest, MapArray2D) { auto identity = [](float value) { return log(exp(value)); }; auto result = ReferenceUtil::MapArray2D(*matrix_, identity); - auto actual_literal = LiteralUtil::CreateR2FromArray2D(*result); + auto actual_literal = Literal::CreateR2FromArray2D(*result); LiteralTestUtil::ExpectR2NearArray2D(*matrix_, *actual_literal, ErrorSpec(0.0001)); } @@ -96,7 +96,7 @@ TEST_F(ReferenceUtilTest, MapWithIndexArray2D) { return value + row + col; }; auto result = ReferenceUtil::MapWithIndexArray2D(*matrix_, add_index); - auto actual_literal = LiteralUtil::CreateR2FromArray2D(*result); + auto actual_literal = Literal::CreateR2FromArray2D(*result); LiteralTestUtil::ExpectR2Near<float>({{1.f, 3.f, 5.f}, {5.f, 7.f, 9.f}}, *actual_literal, ErrorSpec(0.0001)); } @@ -107,7 +107,7 @@ TEST_F(ReferenceUtilTest, MapArray4D) { input->FillWithMultiples(1.0f); auto multiply_by_two = [](float value) { return 2 * value; }; auto result = ReferenceUtil::MapArray4D(*input, multiply_by_two); - auto actual_literal = LiteralUtil::CreateR4FromArray4D(*result); + auto actual_literal = Literal::CreateR4FromArray4D(*result); Array4D<float> expected(/*planes=*/2, /*depth=*/3, /*height=*/4, /*width=*/5); expected.FillWithMultiples(2.0f); @@ -124,7 +124,7 @@ TEST_F(ReferenceUtilTest, MapWithIndexArray4D) { return value - (3 * 4 * 5 * plane + 4 * 5 * depth + 5 * height + width); }; auto result = ReferenceUtil::MapWithIndexArray4D(*input, subtract_index); - auto actual_literal = LiteralUtil::CreateR4FromArray4D(*result); + auto actual_literal = Literal::CreateR4FromArray4D(*result); Array4D<float> expected(/*planes=*/2, /*depth=*/3, /*height=*/4, /*width=*/5); expected.Fill(0.0f); @@ -161,7 +161,7 @@ TEST_F(ReferenceUtilTest, ConvWithSamePadding) { })); // clang-format on - auto actual_literal = LiteralUtil::CreateR4FromArray4D(*actual); + auto actual_literal = Literal::CreateR4FromArray4D(*actual); LiteralTestUtil::ExpectR4NearArray4D<float>(expected, *actual_literal, ErrorSpec(0.0001)); @@ -195,7 +195,7 @@ TEST_F(ReferenceUtilTest, ConvWithValidPadding) { })); // clang-format on - auto actual_literal = LiteralUtil::CreateR4FromArray4D(*actual); + auto actual_literal = Literal::CreateR4FromArray4D(*actual); LiteralTestUtil::ExpectR4NearArray4D<float>(expected, *actual_literal, ErrorSpec(0.0001)); @@ -247,7 +247,7 @@ TEST_F(ReferenceUtilTest, ConvGeneralDimensionsWithSamePadding) { }}); // clang-format on - auto actual_literal = LiteralUtil::CreateR4FromArray4D(*actual); + auto actual_literal = Literal::CreateR4FromArray4D(*actual); LiteralTestUtil::ExpectR4NearArray4D<float>(expected, *actual_literal, ErrorSpec(0.0001)); @@ -296,7 +296,7 @@ TEST_F(ReferenceUtilTest, ConvGeneralDimensionsWithValidPadding) { Array4D<float> expected({{{{2514, 2685}}}}); // clang-format on - auto actual_literal = LiteralUtil::CreateR4FromArray4D(*actual); + auto actual_literal = Literal::CreateR4FromArray4D(*actual); LiteralTestUtil::ExpectR4NearArray4D<float>(expected, *actual_literal, ErrorSpec(0.0001)); @@ -309,7 +309,7 @@ TEST_F(ReferenceUtilTest, ApplyElementwise2D) { auto actual = ReferenceUtil::ApplyElementwise2D( [](float x, float y, float z) { return 100 * x + 10 * y + z; }, a, b, c); - auto actual_literal = LiteralUtil::CreateR2FromArray2D(*actual); + auto actual_literal = Literal::CreateR2FromArray2D(*actual); LiteralTestUtil::ExpectR2Near({{300.f, 600.f}, {900.f, 1200.f}}, *actual_literal, ErrorSpec(0.0001)); } diff --git a/tensorflow/compiler/xla/service/algebraic_simplifier.cc b/tensorflow/compiler/xla/service/algebraic_simplifier.cc index 0fd3cd2ccc..1302026ccf 100644 --- a/tensorflow/compiler/xla/service/algebraic_simplifier.cc +++ b/tensorflow/compiler/xla/service/algebraic_simplifier.cc @@ -48,7 +48,7 @@ namespace { // Returns whether operand is a literal with the given value. bool IsLiteralWithValue(const HloInstruction* operand, int8 value) { return operand->opcode() == HloOpcode::kConstant && - LiteralUtil::IsAll(operand->literal(), value); + operand->literal().IsAll(value); } bool IsAll(const HloInstruction* op, int8 value) { @@ -469,7 +469,7 @@ Status AlgebraicSimplifierVisitor::HandleDot(HloInstruction* dot, ShapeUtil::HasZeroElements(lhs->shape()) || ShapeUtil::HasZeroElements(rhs->shape())) { auto zero = computation_->AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0(0.0f))); + HloInstruction::CreateConstant(Literal::CreateR0(0.0f))); return ReplaceWithNewInstruction( dot, HloInstruction::CreateBroadcast(dot->shape(), zero, {})); } @@ -507,7 +507,7 @@ Status AlgebraicSimplifierVisitor::HandleDot(HloInstruction* dot, HloComputation* add_reduce_computation = CreateScalarBinaryComputation( computation_->parent(), F32, HloOpcode::kAdd); auto zero = computation_->AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0(0.0f))); + HloInstruction::CreateConstant(Literal::CreateR0(0.0f))); auto reduce = computation_->AddInstruction(HloInstruction::CreateReduce( ShapeUtil::MakeShape(dot->shape().element_type(), {}), multiply, zero, {0}, add_reduce_computation)); @@ -531,7 +531,7 @@ Status AlgebraicSimplifierVisitor::HandleDot(HloInstruction* dot, HloComputation* add_reduce_computation = CreateScalarBinaryComputation( computation_->parent(), F32, HloOpcode::kAdd); auto zero = computation_->AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0(0.0f))); + HloInstruction::CreateConstant(Literal::CreateR0(0.0f))); HloInstruction* reduce; if (ShapeUtil::Rank(rhs->shape()) == 1) { auto multiply = computation_->AddInstruction(HloInstruction::CreateBinary( @@ -571,7 +571,7 @@ Status AlgebraicSimplifierVisitor::HandleDot(HloInstruction* dot, HloComputation* add_reduce_computation = CreateScalarBinaryComputation( computation_->parent(), F32, HloOpcode::kAdd); auto zero = computation_->AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0(0.0f))); + HloInstruction::CreateConstant(Literal::CreateR0(0.0f))); auto reduce = computation_->AddInstruction(HloInstruction::CreateReduce( ShapeUtil::MakeShape(dot->shape().element_type(), {lhs->shape().dimensions(0)}), @@ -894,8 +894,8 @@ Status AlgebraicSimplifierVisitor::HandlePower(HloInstruction* power, HloInstruction* rhs) { VLOG(10) << "trying transform [pow(A, 0) => 1]: " << power->ToString(); if (IsAll(rhs, 0)) { - auto one = HloInstruction::CreateConstant(LiteralUtil::CloneToUnique( - LiteralUtil::One(power->shape().element_type()))); + auto one = HloInstruction::CreateConstant( + Literal::One(power->shape().element_type()).CloneToUnique()); std::unique_ptr<HloInstruction> ones; if (ShapeUtil::IsScalar(power->shape())) { ones = std::move(one); @@ -920,9 +920,8 @@ Status AlgebraicSimplifierVisitor::HandlePower(HloInstruction* power, VLOG(10) << "trying transform [pow(A, -1) => 1/A]: " << power->ToString(); if (IsAll(rhs, -1)) { - auto* one = computation_->AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CloneToUnique( - LiteralUtil::One(rhs->shape().element_type())))); + auto* one = computation_->AddInstruction(HloInstruction::CreateConstant( + Literal::One(rhs->shape().element_type()).CloneToUnique())); return ReplaceWithNewInstruction( power, HloInstruction::CreateBinary(power->shape(), HloOpcode::kDivide, one, lhs)); @@ -1005,7 +1004,7 @@ Status AlgebraicSimplifierVisitor::HandleReshape(HloInstruction* reshape) { // dimension. if (ShapeUtil::HasZeroElements(reshape->shape())) { auto empty_constant = HloInstruction::CreateConstant( - LiteralUtil::CreateFromShape(reshape->shape())); + Literal::CreateFromShape(reshape->shape())); return ReplaceWithNewInstruction(reshape, std::move(empty_constant)); } @@ -1205,8 +1204,7 @@ Status AlgebraicSimplifierVisitor::HandleReduceWindow( // try to get more fancy about proving equivalence in cases beyond that. if (pad_value->opcode() != HloOpcode::kConstant || reduce_init_value->opcode() != HloOpcode::kConstant || - !LiteralUtil::Equal(pad_value->literal(), - reduce_init_value->literal())) { + !pad_value->literal().Equal(reduce_init_value->literal())) { VLOG(10) << "Not folding pad into reduce-window due to different pad " "values."; return Status::OK(); diff --git a/tensorflow/compiler/xla/service/algebraic_simplifier_test.cc b/tensorflow/compiler/xla/service/algebraic_simplifier_test.cc index cc1e868bc3..0792006ddb 100644 --- a/tensorflow/compiler/xla/service/algebraic_simplifier_test.cc +++ b/tensorflow/compiler/xla/service/algebraic_simplifier_test.cc @@ -55,7 +55,7 @@ TEST_F(AlgebraicSimplifierTest, AddZero) { HloInstruction* param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, r0f32, "param0")); HloInstruction* zero = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f))); builder.AddInstruction( HloInstruction::CreateBinary(r0f32, HloOpcode::kAdd, param0, zero)); @@ -76,7 +76,7 @@ TEST_F(AlgebraicSimplifierTest, AddBroadcastZeroR0Operand) { HloInstruction* param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, r2f32, "param0")); HloInstruction* zero = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f))); HloInstruction* bcast = builder.AddInstruction( HloInstruction::CreateBroadcast(r2f32, zero, {0, 1})); builder.AddInstruction( @@ -99,7 +99,7 @@ TEST_F(AlgebraicSimplifierTest, AddBroadcastZeroR1Operand) { HloInstruction* param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, r2f32, "param0")); HloInstruction* zero = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({0, 0, 0}))); + HloInstruction::CreateConstant(Literal::CreateR1<float>({0, 0, 0}))); HloInstruction* bcast = builder.AddInstruction(HloInstruction::CreateBroadcast(r2f32, zero, {1})); builder.AddInstruction( @@ -123,7 +123,7 @@ TEST_F(AlgebraicSimplifierTest, SubZero) { HloInstruction* param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, r0f32, "param0")); HloInstruction* zero = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f))); builder.AddInstruction( HloInstruction::CreateBinary(r0f32, HloOpcode::kSubtract, param0, zero)); @@ -145,7 +145,7 @@ TEST_F(AlgebraicSimplifierTest, DivOneScalar) { HloInstruction* param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, r0f32, "param0")); HloInstruction* one = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0f))); HloInstruction* div = builder.AddInstruction( HloInstruction::CreateBinary(r0f32, HloOpcode::kDivide, param0, one)); @@ -167,7 +167,7 @@ TEST_F(AlgebraicSimplifierTest, DivOneArray) { HloInstruction* param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, r2f32, "param0")); HloInstruction* one = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<float>({{1.0, 1.0}, {1.0, 1.0}}))); + Literal::CreateR2<float>({{1.0, 1.0}, {1.0, 1.0}}))); HloInstruction* div = builder.AddInstruction( HloInstruction::CreateBinary(r2f32, HloOpcode::kDivide, param0, one)); @@ -300,7 +300,7 @@ TEST_F(AlgebraicSimplifierTest, Pow0Scalar) { HloInstruction* param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, r0f32, "param0")); HloInstruction* zero = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(0))); builder.AddInstruction( HloInstruction::CreateBinary(r0f32, HloOpcode::kPower, param0, zero)); @@ -315,7 +315,7 @@ TEST_F(AlgebraicSimplifierTest, Pow0Scalar) { HloInstruction* root = computation->root_instruction(); EXPECT_THAT(root, op::Constant()); - EXPECT_EQ(LiteralUtil::GetFirstElement<float>(root->literal()), 1); + EXPECT_EQ(root->literal().GetFirstElement<float>(), 1); } // Test that pow(A, 0) where A is not a scalar is simplified to broadcast(1). @@ -325,7 +325,7 @@ TEST_F(AlgebraicSimplifierTest, Pow0Vector) { HloInstruction* param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, r1f32, "param0")); HloInstruction* zero = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(0))); builder.AddInstruction( HloInstruction::CreateBinary(r1f32, HloOpcode::kPower, param0, zero)); @@ -344,8 +344,7 @@ TEST_F(AlgebraicSimplifierTest, Pow0Vector) { << ShapeUtil::HumanString(root->shape()); EXPECT_EQ(root->dimensions().size(), 0); EXPECT_TRUE(ShapeUtil::IsScalar(root->operand(0)->shape())); - EXPECT_EQ(LiteralUtil::GetFirstElement<float>(root->operand(0)->literal()), - 1); + EXPECT_EQ(root->operand(0)->literal().GetFirstElement<float>(), 1); } // Test that pow(A, 1) is simplified to A. @@ -355,7 +354,7 @@ TEST_F(AlgebraicSimplifierTest, Pow1) { HloInstruction* param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, r0f32, "param0")); HloInstruction* one = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1))); builder.AddInstruction( HloInstruction::CreateBinary(r0f32, HloOpcode::kPower, param0, one)); @@ -378,7 +377,7 @@ TEST_F(AlgebraicSimplifierTest, Pow2) { HloInstruction* param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, r0f32, "param0")); HloInstruction* two = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2))); builder.AddInstruction( HloInstruction::CreateBinary(r0f32, HloOpcode::kPower, param0, two)); @@ -401,7 +400,7 @@ TEST_F(AlgebraicSimplifierTest, PowNegative1) { HloInstruction* param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, r0f32, "param0")); HloInstruction* negative_one = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(-1))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(-1))); builder.AddInstruction(HloInstruction::CreateBinary(r0f32, HloOpcode::kPower, param0, negative_one)); @@ -416,8 +415,7 @@ TEST_F(AlgebraicSimplifierTest, PowNegative1) { HloInstruction* root = computation->root_instruction(); EXPECT_THAT(root, op::Divide(op::Constant(), param0)); - EXPECT_EQ(LiteralUtil::GetFirstElement<float>(root->operand(0)->literal()), - 1); + EXPECT_EQ(root->operand(0)->literal().GetFirstElement<float>(), 1); } TEST_F(AlgebraicSimplifierTest, ReshapeBroadcast) { @@ -451,7 +449,7 @@ TEST_F(AlgebraicSimplifierTest, ReshapeBroadcast) { TEST_F(AlgebraicSimplifierTest, ConvertBetweenSameType) { HloComputation::Builder builder(TestName()); HloInstruction* input = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f))); builder.AddInstruction( HloInstruction::CreateConvert(ShapeUtil::MakeShape(F32, {}), input)); @@ -519,7 +517,7 @@ TEST_F(AlgebraicSimplifierTest, RemoveEmptyConcatenateOperands) { HloInstruction* param1 = builder.AddInstruction( HloInstruction::CreateParameter(1, r1f32, "param1")); HloInstruction* empty_literal = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({}))); + HloInstruction::CreateConstant(Literal::CreateR1<float>({}))); HloInstruction* empty_slice = builder.AddInstruction(HloInstruction::CreateSlice( ShapeUtil::MakeShape(F32, {0}), param1, {42}, {42})); @@ -550,7 +548,7 @@ TEST_F(AlgebraicSimplifierTest, OnlyEmptyConcatenateOperands) { HloInstruction* param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, r1f32, "param0")); HloInstruction* empty_literal = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({}))); + HloInstruction::CreateConstant(Literal::CreateR1<float>({}))); HloInstruction* empty_slice = builder.AddInstruction(HloInstruction::CreateSlice( ShapeUtil::MakeShape(F32, {0}), param0, {42}, {42})); @@ -735,7 +733,7 @@ TEST_F(AlgebraicSimplifierTest, ReshapeAfterEffectiveUnary) { builder.AddInstruction(HloInstruction::CreateReshape( ShapeUtil::MakeShape(F32, {1, 2, 3, 4, 5}), param)); HloInstruction* zero = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f))); builder.AddInstruction( HloInstruction::CreateBinary(ShapeUtil::MakeShape(F32, {1, 2, 3, 4, 5}), HloOpcode::kMaximum, movable_reshape, zero)); @@ -1035,7 +1033,7 @@ TEST_F(AlgebraicSimplifierTest, RemoveNoopPad) { builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {2, 2}), "param")); HloInstruction* zero = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f))); PaddingConfig no_padding; for (int i = 0; i < 2; ++i) { auto dimension = no_padding.add_dimensions(); @@ -1066,7 +1064,7 @@ TEST_F(AlgebraicSimplifierTest, NegativePadding) { builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {10, 10}), "param")); HloInstruction* zero = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f))); PaddingConfig padding; int64 low_padding[2] = {-1, -2}; int64 high_padding[2] = {2, -3}; @@ -1376,9 +1374,9 @@ TEST_F(AlgebraicSimplifierTest, MaxMinToClamp) { HloInstruction* param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, r0f32, "param0")); HloInstruction* min_value = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f))); HloInstruction* max_value = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0f))); HloInstruction* min = builder.AddInstruction(HloInstruction::CreateBinary( r0f32, HloOpcode::kMinimum, param0, min_value)); builder.AddInstruction( @@ -1406,9 +1404,9 @@ TEST_F(AlgebraicSimplifierTest, MinMaxToClamp) { HloInstruction* param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, r0f32, "param0")); HloInstruction* min_value = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f))); HloInstruction* max_value = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0f))); HloInstruction* max = builder.AddInstruction(HloInstruction::CreateBinary( r0f32, HloOpcode::kMaximum, param0, max_value)); builder.AddInstruction( @@ -1437,9 +1435,9 @@ TEST_F(AlgebraicSimplifierTest, MinMaxWithBroadcastToClamp) { HloInstruction* param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, r1f32, "param0")); HloInstruction* min_value = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f))); HloInstruction* max_value = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0f))); HloInstruction* max = builder.AddInstruction(HloInstruction::CreateBinary( r1f32, HloOpcode::kMaximum, param0, max_value)); builder.AddInstruction( @@ -1497,9 +1495,9 @@ TEST_F(AlgebraicSimplifierTest, MinEquationWithMaxNotToClamp) { HloInstruction* param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, r0f32, "param0")); HloInstruction* min_value = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f))); HloInstruction* max_value = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0f))); HloInstruction* max = builder.AddInstruction(HloInstruction::CreateBinary( r0f32, HloOpcode::kMaximum, param0, max_value)); HloInstruction* fmax = builder.AddInstruction( @@ -1566,7 +1564,7 @@ TEST_F(AlgebraicSimplifierTest, ScalarBroadcastToSlice) { TEST_F(AlgebraicSimplifierTest, ScalarBroadcastToTransposeReshape) { HloComputation::Builder builder(TestName()); HloInstruction* forty_two = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f))); Shape broadcast_shape = ShapeUtil::MakeShape(F32, {4, 5, 6}); HloInstruction* broadcast = @@ -1614,7 +1612,7 @@ TEST_F(AlgebraicSimplifierTest, FoldPadIntoReduceWindow) { padding.mutable_dimensions(3)->set_edge_padding_high(2); HloInstruction* pad_value = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(5.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(5.0f))); HloInstruction* pad = builder.AddInstruction(HloInstruction::CreatePad( ShapeUtil::MakeShape(F32, {1, 3, 3, 5}), operand, pad_value, padding)); @@ -1645,7 +1643,7 @@ TEST_F(AlgebraicSimplifierTest, FoldPadIntoReduceWindow) { const Shape reduce_window_shape = ShapeUtil::MakeShape(F32, {111, 113, 113, 115}); HloInstruction* reduce_init_value = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(5.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(5.0f))); HloInstruction* reduce_window = builder.AddInstruction(HloInstruction::CreateReduceWindow( reduce_window_shape, pad, reduce_init_value, window, @@ -1714,9 +1712,9 @@ TEST_F(AlgebraicSimplifierTest, IteratorInvalidation) { HloComputation::Builder call_builder(TestName() + ".Call"); HloInstruction* zero = call_builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({0.0f}))); + HloInstruction::CreateConstant(Literal::CreateR1<float>({0.0f}))); HloInstruction* one = call_builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({1.0f}))); + HloInstruction::CreateConstant(Literal::CreateR1<float>({1.0f}))); builder.AddInstruction( HloInstruction::CreateCall(r1f32, {zero, one}, dot_computation.get())); diff --git a/tensorflow/compiler/xla/service/buffer_assignment_test.cc b/tensorflow/compiler/xla/service/buffer_assignment_test.cc index ddfb847c5d..10021b2513 100644 --- a/tensorflow/compiler/xla/service/buffer_assignment_test.cc +++ b/tensorflow/compiler/xla/service/buffer_assignment_test.cc @@ -105,7 +105,7 @@ class BufferAssignmentTest : public HloTestBase { auto param = builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "x")); auto value = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); builder.AddInstruction( HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, param, value)); return builder.Build(); @@ -122,7 +122,7 @@ class BufferAssignmentTest : public HloTestBase { const string& name) { auto builder = HloComputation::Builder(name); auto const4 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(4))); + HloInstruction::CreateConstant(Literal::CreateR0<int>(4))); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, t_s32_f32v4_, "x")); auto index = builder.AddInstruction( @@ -147,9 +147,9 @@ class BufferAssignmentTest : public HloTestBase { const string& name) { auto builder = HloComputation::Builder(name); auto const1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(1))); + HloInstruction::CreateConstant(Literal::CreateR0<int>(1))); auto constv = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f}))); + Literal::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f}))); auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, t_s32_f32v4_, "x")); auto indexc = builder.AddInstruction( @@ -264,7 +264,7 @@ static bool BuffersDistinct(const std::vector<const HloInstruction*>& a, TEST_F(BufferAssignmentTest, ScalarConstant) { auto builder = HloComputation::Builder(TestName()); auto const0 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto module = CreateNewModule(); module->AddEntryComputation(builder.Build()); @@ -278,9 +278,9 @@ TEST_F(BufferAssignmentTest, BufferForConst) { // no buffers assigned, and their consumer has a buffer. auto builder = HloComputation::Builder(TestName()); auto const0 = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f}))); + Literal::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f}))); auto const1 = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR1<float>({4.1f, 4.2f, 4.3f, 4.4f}))); + Literal::CreateR1<float>({4.1f, 4.2f, 4.3f, 4.4f}))); auto add = builder.AddInstruction( HloInstruction::CreateBinary(f32vec4_, HloOpcode::kAdd, const0, const1)); auto module = CreateNewModule(); @@ -298,7 +298,7 @@ TEST_F(BufferAssignmentTest, BufferForOutputConst) { // This computation copies a constant to output. auto builder = HloComputation::Builder(TestName()); auto const0 = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f}))); + Literal::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f}))); auto copy = builder.AddInstruction( HloInstruction::CreateUnary(const0->shape(), HloOpcode::kCopy, const0)); auto module = CreateNewModule(); @@ -586,7 +586,7 @@ TEST_F(BufferAssignmentTest, CannotReuseInputBufferOfReduce) { auto exp2 = builder.AddInstruction( HloInstruction::CreateUnary(f32a100x10_, HloOpcode::kExp, exp1)); auto const0 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f))); auto reduce = builder.AddInstruction(HloInstruction::CreateReduce( /*shape=*/f32vec10_, /*operand=*/exp2, @@ -634,9 +634,9 @@ TEST_F(BufferAssignmentTest, ExampleWhile) { // Creates the main kernel and verifies instruction counts. auto builder = HloComputation::Builder(TestName()); auto const3 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(0))); + HloInstruction::CreateConstant(Literal::CreateR0<int>(0))); auto const4 = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f}))); + Literal::CreateR1<float>({1.1f, 2.2f, 3.3f, 4.4f}))); auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({const3, const4})); auto while_op = builder.AddInstruction(HloInstruction::CreateWhile( @@ -1075,9 +1075,8 @@ TEST_F(BufferAssignmentTest, DISABLED_TupleConstantAsOutput) { // Test that a tuple constant which is forwarded to the computation output is // properly handled. auto builder = HloComputation::Builder(TestName()); - builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::MakeTuple({LiteralUtil::CreateR0<int64>(0).get(), - LiteralUtil::CreateR0<int64>(1).get()}))); + builder.AddInstruction(HloInstruction::CreateConstant(Literal::MakeTuple( + {Literal::CreateR0<int64>(0).get(), Literal::CreateR0<int64>(1).get()}))); auto module = CreateNewModule(); module->AddEntryComputation(builder.Build()); @@ -1369,9 +1368,9 @@ class WhileBufferAssignmentTest : public HloTestBase { builder.AddInstruction( HloInstruction::CreateParameter(0, loop_state_shape_, "loop_state")); auto zero = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(0))); + HloInstruction::CreateConstant(Literal::CreateR0<int>(0))); auto ten = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(10))); + HloInstruction::CreateConstant(Literal::CreateR0<int>(10))); builder.AddInstruction(HloInstruction::CreateBinary( ShapeUtil::MakeShape(PRED, {}), HloOpcode::kLt, zero, ten)); return builder.Build(); @@ -1429,7 +1428,7 @@ TEST_F(WhileBufferAssignmentTest, TwoForwardWhileLoops) { HloInstruction::CreateParameter(2, data_shape_, "weights1")); auto zero = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0))); auto output0 = builder.AddInstruction( HloInstruction::CreateBroadcast(data_shape_, zero, {1})); auto output1 = builder.AddInstruction( @@ -1484,7 +1483,7 @@ TEST_F(WhileBufferAssignmentTest, OneForwardBackwardWhileLoopSet) { HloInstruction::CreateParameter(1, data_shape_, "weights0")); auto zero = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0))); auto output0 = builder.AddInstruction( HloInstruction::CreateBroadcast(data_shape_, zero, {1})); auto output1 = builder.AddInstruction( @@ -1532,16 +1531,16 @@ TEST_F(BufferAssignmentTest, TwoCalls) { auto param = builder.AddInstruction( HloInstruction::CreateParameter(0, r0f32, "param")); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto add = builder.AddInstruction( HloInstruction::CreateBinary(r0f32, HloOpcode::kAdd, param, constant1)); sub_computation = module->AddEmbeddedComputation(builder.Build(add)); } auto builder = HloComputation::Builder(TestName()); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto constant3 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(3.0))); auto call1 = builder.AddInstruction( HloInstruction::CreateCall(r0f32, {constant2}, sub_computation)); auto call2 = builder.AddInstruction( @@ -1593,9 +1592,9 @@ TEST_F(WhileBufferAssignmentTest, WhileLoopsInterferingResultRange) { auto builder = HloComputation::Builder(TestName()); auto zero = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0))); auto one = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto input0 = builder.AddInstruction( HloInstruction::CreateParameter(0, data_shape_, "input0")); @@ -1675,7 +1674,7 @@ TEST_F(WhileBufferAssignmentTest, DISABLED_TwoWhiles) { HloInstruction::CreateParameter(1, data_shape_, "weights0")); auto zero = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0))); auto output0 = builder.AddInstruction( HloInstruction::CreateBroadcast(data_shape_, zero, {1})); diff --git a/tensorflow/compiler/xla/service/buffer_liveness_test.cc b/tensorflow/compiler/xla/service/buffer_liveness_test.cc index c5c24e2d48..a31e9b1782 100644 --- a/tensorflow/compiler/xla/service/buffer_liveness_test.cc +++ b/tensorflow/compiler/xla/service/buffer_liveness_test.cc @@ -397,13 +397,11 @@ TEST_F(BufferLivenessTest, TupleConstantLiveOut) { // computation. The buffer containing {0, 1} is copied by GetTupleElement, and // the buffers containing {3} and 3 are dead. auto builder = HloComputation::Builder(TestName()); - auto inner_tuple0 = - LiteralUtil::MakeTuple({LiteralUtil::CreateR0<int64>(0).get(), - LiteralUtil::CreateR0<int64>(1).get()}); - auto inner_tuple1 = - LiteralUtil::MakeTuple({LiteralUtil::CreateR0<int64>(3).get()}); + auto inner_tuple0 = Literal::MakeTuple( + {Literal::CreateR0<int64>(0).get(), Literal::CreateR0<int64>(1).get()}); + auto inner_tuple1 = Literal::MakeTuple({Literal::CreateR0<int64>(3).get()}); auto tuple_constant = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::MakeTuple({inner_tuple0.get(), inner_tuple1.get()}))); + Literal::MakeTuple({inner_tuple0.get(), inner_tuple1.get()}))); builder.AddInstruction(HloInstruction::CreateGetTupleElement( inner_tuple0->shape(), tuple_constant, 0)); @@ -450,7 +448,7 @@ TEST_F(BufferLivenessTest, IndependentTupleElements) { builder.AddInstruction(HloInstruction::CreateGetTupleElement( tuple_element0_shape, tuple_param0, 0)); auto const0 = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR1<float>({1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f}))); + Literal::CreateR1<float>({1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f}))); auto add0 = builder.AddInstruction(HloInstruction::CreateBinary( tuple_element0_shape, HloOpcode::kAdd, tuple_element0, const0)); @@ -462,7 +460,7 @@ TEST_F(BufferLivenessTest, IndependentTupleElements) { builder.AddInstruction(HloInstruction::CreateGetTupleElement( tuple_element1_shape, tuple_param0, 1)); auto const1 = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR1<float>({2.f, 2.f, 2.f, 2.f, 2.f, 2.f, 2.f, 2.f}))); + Literal::CreateR1<float>({2.f, 2.f, 2.f, 2.f, 2.f, 2.f, 2.f, 2.f}))); auto add1 = builder.AddInstruction(HloInstruction::CreateBinary( tuple_element1_shape, HloOpcode::kAdd, tuple_element1, const1)); @@ -513,7 +511,7 @@ TEST_F(BufferLivenessTest, DependentTupleElements) { builder.AddInstruction(HloInstruction::CreateGetTupleElement( tuple_element0_shape, tuple_param0, 0)); auto const0 = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR1<float>({1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f}))); + Literal::CreateR1<float>({1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f}))); auto add0 = builder.AddInstruction(HloInstruction::CreateBinary( tuple_element0_shape, HloOpcode::kAdd, tuple_element0, const0)); @@ -585,7 +583,7 @@ class FusedDynamicUpdateSliceLivenessTest : public BufferLivenessTest { HloInstruction::CreateGetTupleElement(data_shape, tuple_param0, 1)); auto update = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR1<float>({2.f, 2.f, 2.f}))); + Literal::CreateR1<float>({2.f, 2.f, 2.f}))); HloInstruction* slice = nullptr; if (update_uses_tuple_element1) { // Create a slice instruction as an additional user of 'gte1'. @@ -596,7 +594,7 @@ class FusedDynamicUpdateSliceLivenessTest : public BufferLivenessTest { } // Create a DynamicUpdateSlice instruction of tuple element 1 with 'update'. auto starts = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR1<int32>({2}))); + HloInstruction::CreateConstant(Literal::CreateR1<int32>({2}))); auto dynamic_update_slice = builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice( data_shape, gte1, update, starts)); @@ -715,7 +713,7 @@ class DynamicUpdateSliceLivenessTest : public BufferLivenessTest { HloInstruction::CreateGetTupleElement(data_shape, tuple_param0, 1)); auto update = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR1<float>({2.f, 2.f, 2.f}))); + Literal::CreateR1<float>({2.f, 2.f, 2.f}))); if (tuple_element1_has_two_uses) { // Add 'gte0' and 'gte1' to create another user of 'gte1'. @@ -724,7 +722,7 @@ class DynamicUpdateSliceLivenessTest : public BufferLivenessTest { } // Create a DynamicUpdateSlice instruction of tuple element 1 with 'update'. auto starts = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR1<int32>({2}))); + HloInstruction::CreateConstant(Literal::CreateR1<int32>({2}))); auto dynamic_update_slice = builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice( data_shape, gte1, update, starts)); diff --git a/tensorflow/compiler/xla/service/call_graph_test.cc b/tensorflow/compiler/xla/service/call_graph_test.cc index e276473c90..bbf67c9803 100644 --- a/tensorflow/compiler/xla/service/call_graph_test.cc +++ b/tensorflow/compiler/xla/service/call_graph_test.cc @@ -81,7 +81,7 @@ class CallGraphTest : public HloTestBase { HloInstruction* param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, kScalarShape, "param0")); HloInstruction* zero = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f))); builder.AddInstruction(HloInstruction::CreateBinary( ShapeUtil::MakeShape(PRED, {}), HloOpcode::kGt, param0, zero)); return builder.Build(); diff --git a/tensorflow/compiler/xla/service/copy_insertion_test.cc b/tensorflow/compiler/xla/service/copy_insertion_test.cc index cc77339bb6..026be75757 100644 --- a/tensorflow/compiler/xla/service/copy_insertion_test.cc +++ b/tensorflow/compiler/xla/service/copy_insertion_test.cc @@ -87,7 +87,7 @@ TEST_F(CopyInsertionTest, SingleParameter) { TEST_F(CopyInsertionTest, SingleConstant) { auto builder = HloComputation::Builder(TestName()); HloInstruction* constant = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); HloInstruction* tuple = builder.AddInstruction(HloInstruction::CreateTuple({constant})); @@ -110,9 +110,9 @@ TEST_F(CopyInsertionTest, MultipleConstantsAndParameters) { auto builder = HloComputation::Builder(TestName()); HloInstruction* constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); HloInstruction* constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); HloInstruction* x = builder.AddInstruction( HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "x")); @@ -140,11 +140,11 @@ TEST_F(CopyInsertionTest, AmbiguousPointsToSet) { // the computation result. Verify that copies are added properly. auto builder = HloComputation::Builder(TestName()); HloInstruction* constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); HloInstruction* constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); HloInstruction* constant3 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(3.0))); HloInstruction* tuple1 = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); @@ -152,7 +152,7 @@ TEST_F(CopyInsertionTest, AmbiguousPointsToSet) { HloInstruction::CreateTuple({constant3, constant2})); HloInstruction* pred = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); + HloInstruction::CreateConstant(Literal::CreateR0<bool>(false))); builder.AddInstruction(HloInstruction::CreateTernary( tuple1->shape(), HloOpcode::kSelect, pred, tuple1, tuple2)); @@ -196,9 +196,8 @@ TEST_F(CopyInsertionTest, BitcastConstant) { // The output of a bitcast is its operand (same buffer), so a bitcast // constant feeding the result must have a copy added. auto builder = HloComputation::Builder(TestName()); - HloInstruction* constant = - builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR1<float>({1.0, 42.0}))); + HloInstruction* constant = builder.AddInstruction( + HloInstruction::CreateConstant(Literal::CreateR1<float>({1.0, 42.0}))); HloInstruction* bitcast = builder.AddInstruction(HloInstruction::CreateUnary( ShapeUtil::MakeShape(F32, {2, 2}), HloOpcode::kBitcast, constant)); @@ -308,9 +307,9 @@ TEST_F(CopyInsertionTest, AmbiguousTopLevelRoot) { // copy is added. auto builder = HloComputation::Builder(TestName()); HloInstruction* constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); HloInstruction* constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); HloInstruction* tuple1 = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); @@ -318,7 +317,7 @@ TEST_F(CopyInsertionTest, AmbiguousTopLevelRoot) { HloInstruction::CreateTuple({constant2, constant1})); HloInstruction* pred = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); + HloInstruction::CreateConstant(Literal::CreateR0<bool>(false))); HloInstruction* select = builder.AddInstruction(HloInstruction::CreateTernary( tuple1->shape(), HloOpcode::kSelect, pred, tuple1, tuple2)); HloInstruction* gte = @@ -350,7 +349,7 @@ class WhileCopyInsertionTest : public CopyInsertionTest { bool nested = false) { auto builder = HloComputation::Builder(TestName() + ".Condition"); auto limit_const = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(10))); + HloInstruction::CreateConstant(Literal::CreateR0<int32>(10))); const Shape& loop_state_shape = nested ? nested_loop_state_shape_ : loop_state_shape_; auto loop_state = builder.AddInstruction( @@ -381,7 +380,7 @@ class WhileCopyInsertionTest : public CopyInsertionTest { builder.AddInstruction(HloInstruction::CreateGetTupleElement( induction_variable_shape_, loop_state, 0)); auto inc = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(1))); + HloInstruction::CreateConstant(Literal::CreateR0<int32>(1))); auto add0 = builder.AddInstruction(HloInstruction::CreateBinary( induction_variable->shape(), HloOpcode::kAdd, induction_variable, inc)); // Update data GTE(1). @@ -419,7 +418,7 @@ class WhileCopyInsertionTest : public CopyInsertionTest { builder.AddInstruction(HloInstruction::CreateGetTupleElement( induction_variable_shape_, loop_state, 0)); auto inc = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(1))); + HloInstruction::CreateConstant(Literal::CreateR0<int32>(1))); // add0 = Add(in0, 1) auto add0 = builder.AddInstruction(HloInstruction::CreateBinary( @@ -488,7 +487,7 @@ class WhileCopyInsertionTest : public CopyInsertionTest { builder.AddInstruction(HloInstruction::CreateGetTupleElement( induction_variable_shape_, loop_state, 0)); auto inc = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(1))); + HloInstruction::CreateConstant(Literal::CreateR0<int32>(1))); // add0 = Add(in0, 1) auto add0 = builder.AddInstruction(HloInstruction::CreateBinary( induction_variable->shape(), HloOpcode::kAdd, induction_variable, inc)); @@ -503,9 +502,8 @@ class WhileCopyInsertionTest : public CopyInsertionTest { data = builder.AddInstruction( HloInstruction::CreateGetTupleElement(data_shape_, loop_state, 1)); } - auto update = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>( - {1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f}))); + auto update = builder.AddInstruction(HloInstruction::CreateConstant( + Literal::CreateR1<float>({1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f}))); // add1 = Add(in1, {1, 1, 1, 1, 1, 1, 1, 1}) auto add1 = builder.AddInstruction(HloInstruction::CreateBinary( data_shape_, HloOpcode::kAdd, data, update)); @@ -538,7 +536,7 @@ class WhileCopyInsertionTest : public CopyInsertionTest { auto gte0 = builder.AddInstruction(HloInstruction::CreateGetTupleElement( induction_variable_shape_, loop_state, 0)); auto inc = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(1))); + HloInstruction::CreateConstant(Literal::CreateR0<int32>(1))); auto add0 = builder.AddInstruction(HloInstruction::CreateBinary( gte0->shape(), HloOpcode::kAdd, gte0, inc)); @@ -548,9 +546,8 @@ class WhileCopyInsertionTest : public CopyInsertionTest { // GTE(GTE(loop_state, 1), 0) -> Add auto gte10 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(data_shape_, gte1, 0)); - auto update10 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>( - {1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f}))); + auto update10 = builder.AddInstruction(HloInstruction::CreateConstant( + Literal::CreateR1<float>({1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f}))); auto add10 = builder.AddInstruction(HloInstruction::CreateBinary( data_shape_, HloOpcode::kAdd, gte10, update10)); @@ -574,11 +571,10 @@ class WhileCopyInsertionTest : public CopyInsertionTest { bool nested = false) { auto builder = HloComputation::Builder(TestName() + ".While"); auto induction_var_init = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(0))); + HloInstruction::CreateConstant(Literal::CreateR0<int32>(0))); - auto data_init = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>( - {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}))); + auto data_init = builder.AddInstruction(HloInstruction::CreateConstant( + Literal::CreateR1<float>({0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}))); if (nested) { auto inner_init = builder.AddInstruction( @@ -601,9 +597,8 @@ class WhileCopyInsertionTest : public CopyInsertionTest { HloInstruction* BuildWhileInstruction_InitPointsToConstant() { auto builder = HloComputation::Builder(TestName() + ".While"); - auto data_init = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>( - {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}))); + auto data_init = builder.AddInstruction(HloInstruction::CreateConstant( + Literal::CreateR1<float>({0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}))); return BuildWhileInstructionWithCustomInit(loop_state_shape_, data_init, &builder); } @@ -620,11 +615,11 @@ class WhileCopyInsertionTest : public CopyInsertionTest { auto builder = HloComputation::Builder(TestName() + ".While"); auto one = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto v1 = builder.AddInstruction( HloInstruction::CreateBroadcast(data_shape_, one, {1})); auto zero = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto v2 = builder.AddInstruction( HloInstruction::CreateBroadcast(data_shape_, zero, {1})); @@ -632,7 +627,7 @@ class WhileCopyInsertionTest : public CopyInsertionTest { auto tuple2 = builder.AddInstruction(HloInstruction::CreateTuple({v2, v1})); auto pred = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); + HloInstruction::CreateConstant(Literal::CreateR0<bool>(false))); auto data_init = builder.AddInstruction(HloInstruction::CreateTernary( nested_tuple_shape_, HloOpcode::kSelect, pred, tuple1, tuple2)); @@ -644,7 +639,7 @@ class WhileCopyInsertionTest : public CopyInsertionTest { auto builder = HloComputation::Builder(TestName() + ".While"); auto one = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto one_vec = builder.AddInstruction( HloInstruction::CreateBroadcast(data_shape_, one, {1})); auto data_init = @@ -657,12 +652,11 @@ class WhileCopyInsertionTest : public CopyInsertionTest { HloInstruction* BuildWhileInstruction_InitPointsToInterfering() { auto builder = HloComputation::Builder(TestName() + ".While"); auto one = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto data_init = builder.AddInstruction( HloInstruction::CreateBroadcast(data_shape_, one, {1})); - auto one_vec = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>( - {1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f}))); + auto one_vec = builder.AddInstruction(HloInstruction::CreateConstant( + Literal::CreateR1<float>({1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f}))); // Take a reference to 'data_init' to make it interfere with while result. builder.AddInstruction(HloInstruction::CreateBinary( data_shape_, HloOpcode::kAdd, data_init, one_vec)); @@ -677,7 +671,7 @@ class WhileCopyInsertionTest : public CopyInsertionTest { const bool nested = ShapeUtil::Equal(loop_state_shape, nested_loop_state_shape_); auto induction_var_init = builder->AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(0))); + HloInstruction::CreateConstant(Literal::CreateR0<int32>(0))); auto condition = module_->AddEmbeddedComputation(BuildConditionComputation(nested)); auto body = module_->AddEmbeddedComputation( diff --git a/tensorflow/compiler/xla/service/cpu/conv_canonicalization_test.cc b/tensorflow/compiler/xla/service/cpu/conv_canonicalization_test.cc index f5ad431277..ec992f15e6 100644 --- a/tensorflow/compiler/xla/service/cpu/conv_canonicalization_test.cc +++ b/tensorflow/compiler/xla/service/cpu/conv_canonicalization_test.cc @@ -59,11 +59,11 @@ TEST_F(ConvCanonicalizationTest, NonCanonicalToCanonical) { auto builder = HloComputation::Builder(TestName()); // The input dimensions are in CNHW order. auto input = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR4FromArray4D(Array4D<float>( + Literal::CreateR4FromArray4D(Array4D<float>( kInputFeatureCount, kBatchSize, kInputSize, kInputSize)))); // The kernel dimensions are in OIHW order. auto kernel = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR4FromArray4D(Array4D<float>( + Literal::CreateR4FromArray4D(Array4D<float>( kOutputFeatureCount, kInputFeatureCount, kWindowSize, kWindowSize)))); ConvolutionDimensionNumbers dnums; @@ -113,11 +113,11 @@ TEST_F(ConvCanonicalizationTest, CanonicalStaysTheSame) { auto builder = HloComputation::Builder(TestName()); // The input dimensions are in NHWC order. auto input = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR4FromArray4D(Array4D<float>( + Literal::CreateR4FromArray4D(Array4D<float>( kBatchSize, kInputSize, kInputSize, kInputFeatureCount)))); // The kernel dimensions are in HWIO order. auto kernel = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR4FromArray4D(Array4D<float>( + Literal::CreateR4FromArray4D(Array4D<float>( kWindowSize, kWindowSize, kInputFeatureCount, kOutputFeatureCount)))); ConvolutionDimensionNumbers dnums; diff --git a/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc b/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc index 9c3463d103..4a73bda05e 100644 --- a/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc +++ b/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc @@ -406,7 +406,7 @@ StatusOr<std::unique_ptr<Executable>> CpuCompiler::Compile( if (instruction->opcode() == HloOpcode::kConstant) { // Copy the constant out of the ProtocolBuffer so that we can give it a // higher alignment. - const void* data = LiteralUtil::InternalData(instruction->literal()); + const void* data = instruction->literal().InternalData(); int64 size = CpuExecutable::ShapeSizeBytes(instruction->shape()); auto iter = aligned_constants.emplace( instruction, MakeUnique<unsigned char[]>(size)); diff --git a/tensorflow/compiler/xla/service/cpu/sample_harness.cc b/tensorflow/compiler/xla/service/cpu/sample_harness.cc index 8f1ce82d49..b3f4609d46 100644 --- a/tensorflow/compiler/xla/service/cpu/sample_harness.cc +++ b/tensorflow/compiler/xla/service/cpu/sample_harness.cc @@ -38,13 +38,12 @@ int main(int argc, char** argv) { // Transfer parameters. std::unique_ptr<xla::Literal> param0_literal = - xla::LiteralUtil::CreateR1<float>({1.1f, 2.2f, 3.3f, 5.5f}); + xla::Literal::CreateR1<float>({1.1f, 2.2f, 3.3f, 5.5f}); std::unique_ptr<xla::GlobalData> param0_data = client->TransferToServer(*param0_literal).ConsumeValueOrDie(); - std::unique_ptr<xla::Literal> param1_literal = - xla::LiteralUtil::CreateR2<float>( - {{3.1f, 4.2f, 7.3f, 9.5f}, {1.1f, 2.2f, 3.3f, 4.4f}}); + std::unique_ptr<xla::Literal> param1_literal = xla::Literal::CreateR2<float>( + {{3.1f, 4.2f, 7.3f, 9.5f}, {1.1f, 2.2f, 3.3f, 4.4f}}); std::unique_ptr<xla::GlobalData> param1_data = client->TransferToServer(*param1_literal).ConsumeValueOrDie(); @@ -69,7 +68,7 @@ int main(int argc, char** argv) { LOG(INFO) << tensorflow::strings::Printf("computation took %lldns", profile.compute_time_ns()); - LOG(INFO) << xla::LiteralUtil::ToString(*actual); + LOG(INFO) << actual->ToString(); return 0; } diff --git a/tensorflow/compiler/xla/service/cpu_transfer_manager.cc b/tensorflow/compiler/xla/service/cpu_transfer_manager.cc index 5e431687c4..1d553cab1a 100644 --- a/tensorflow/compiler/xla/service/cpu_transfer_manager.cc +++ b/tensorflow/compiler/xla/service/cpu_transfer_manager.cc @@ -81,8 +81,7 @@ Status CpuTransferManager::TransferLiteralToInfeed(se::StreamExecutor* executor, ShapeUtil::HumanString(literal.shape()).c_str(), size); } - return TransferBufferToInfeed(executor, size, - LiteralUtil::InternalData(literal)); + return TransferBufferToInfeed(executor, size, literal.InternalData()); } Status CpuTransferManager::TransferBufferToInfeed(se::StreamExecutor* executor, diff --git a/tensorflow/compiler/xla/service/flatten_call_graph_test.cc b/tensorflow/compiler/xla/service/flatten_call_graph_test.cc index bb4712c86f..a08506d84d 100644 --- a/tensorflow/compiler/xla/service/flatten_call_graph_test.cc +++ b/tensorflow/compiler/xla/service/flatten_call_graph_test.cc @@ -80,7 +80,7 @@ class FlattenCallGraphTest : public HloTestBase { HloInstruction* param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, kScalarShape, "param0")); HloInstruction* zero = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f))); builder.AddInstruction(HloInstruction::CreateBinary( ShapeUtil::MakeShape(PRED, {}), HloOpcode::kGt, param0, zero)); return builder.Build(); @@ -157,7 +157,7 @@ TEST_F(FlattenCallGraphTest, SharedWhileConditionAndBody) { builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(PRED, {}), "param0")); HloInstruction* false_constant = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); + HloInstruction::CreateConstant(Literal::CreateR0<bool>(false))); builder.AddInstruction( HloInstruction::CreateBinary(ShapeUtil::MakeShape(PRED, {}), HloOpcode::kEq, param0, false_constant)); @@ -168,7 +168,7 @@ TEST_F(FlattenCallGraphTest, SharedWhileConditionAndBody) { { HloComputation::Builder builder(TestName() + ".entry"); HloInstruction* false_constant = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); + HloInstruction::CreateConstant(Literal::CreateR0<bool>(false))); builder.AddInstruction(HloInstruction::CreateWhile( ShapeUtil::MakeShape(PRED, {}), cond_computation, cond_computation, false_constant)); diff --git a/tensorflow/compiler/xla/service/generic_transfer_manager.cc b/tensorflow/compiler/xla/service/generic_transfer_manager.cc index b1c7eadf6a..476b2b8d6f 100644 --- a/tensorflow/compiler/xla/service/generic_transfer_manager.cc +++ b/tensorflow/compiler/xla/service/generic_transfer_manager.cc @@ -82,13 +82,12 @@ Status GenericTransferManager::TransferLiteralFromDevice( } *literal->mutable_shape() = device_shape; - LiteralUtil::Reserve(ShapeUtil::ElementsIn(device_shape), literal); + literal->Reserve(ShapeUtil::ElementsIn(device_shape)); TF_RETURN_IF_ERROR(TransferBufferFromDevice( executor, source, /*size=*/ShapeUtil::ByteSizeOf(device_shape), - /*destination=*/LiteralUtil::MutableInternalData(literal))); + /*destination=*/literal->MutableInternalData())); if (!ShapeUtil::Equal(literal_shape, device_shape)) { - literal->Swap( - LiteralUtil::Relayout(*literal, literal_shape.layout()).get()); + literal->Swap(literal->Relayout(literal_shape.layout()).get()); } TF_RET_CHECK(ShapeUtil::Equal(literal_shape, literal->shape())); return Status::OK(); @@ -152,9 +151,9 @@ Status GenericTransferManager::TransferLiteralToDevice( tuple_elements_on_device.data(), destination); } - return TransferBufferToDevice( - executor, /*size=*/GetByteSizeRequirement(shape), - /*source=*/LiteralUtil::InternalData(literal), destination); + return TransferBufferToDevice(executor, + /*size=*/GetByteSizeRequirement(shape), + /*source=*/literal.InternalData(), destination); } Status GenericTransferManager::TransferLiteralToInfeed( diff --git a/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc b/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc index 2987c8913d..c2dec7ed6a 100644 --- a/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc +++ b/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc @@ -55,7 +55,7 @@ using tensorflow::strings::StrAppend; // Returns whether operand is a floating-point literal with the given value. bool IsFPLiteralWithValue(const HloInstruction* operand, float value) { return operand->opcode() == HloOpcode::kConstant && - LiteralUtil::IsAllFloat(operand->literal(), value); + operand->literal().IsAllFloat(value); } GpuElementalIrEmitter::GpuElementalIrEmitter( diff --git a/tensorflow/compiler/xla/service/gpu/fusion_merger_test.cc b/tensorflow/compiler/xla/service/gpu/fusion_merger_test.cc index 8afc32dea9..242c32936d 100644 --- a/tensorflow/compiler/xla/service/gpu/fusion_merger_test.cc +++ b/tensorflow/compiler/xla/service/gpu/fusion_merger_test.cc @@ -59,7 +59,7 @@ class FusionMergerTest : public HloTestBase { // Create const vector of ones to be used in element-wise computations. auto one_vec = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR1<float>({1.f, 1.f, 1.f, 1.f}))); + Literal::CreateR1<float>({1.f, 1.f, 1.f, 1.f}))); // Create simple fusable computation for tuple element 0 (wont get merged). auto out0 = builder.AddInstruction(HloInstruction::CreateBinary( @@ -138,7 +138,7 @@ class FusionMergerTest : public HloTestBase { // Create two sub-computations, both of which are users of 'mul0'. auto one_vec = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR1<float>({1.f, 1.f, 1.f, 1.f}))); + Literal::CreateR1<float>({1.f, 1.f, 1.f, 1.f}))); // First sub-computation: out0 = Mul(Add(mul0, one_vec), one_vec) auto add0 = builder.AddInstruction(HloInstruction::CreateBinary( @@ -209,7 +209,7 @@ class FusionMergerTest : public HloTestBase { // Create two fusable sub-computations which are dependent on shared // computation 'reduce_out'. auto one_vec = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR1<float>({1.f, 1.f, 1.f, 1.f}))); + Literal::CreateR1<float>({1.f, 1.f, 1.f, 1.f}))); // First sub-computation: out0 = Mul(Add(reduce_out, one_vec), one_vec) auto add2 = builder.AddInstruction(HloInstruction::CreateBinary( diff --git a/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc b/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc index e44b645342..ab04d1736e 100644 --- a/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc +++ b/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc @@ -1648,7 +1648,7 @@ std::unique_ptr<Thunk> IrEmitterUnnested::BuildCopyThunk( const HloInstruction* operand = inst->operand(0); CHECK_EQ(HloOpcode::kConstant, operand->opcode()); return MakeUnique<CopyThunk>( - /*source_address=*/LiteralUtil::InternalData(operand->literal()), + /*source_address=*/operand->literal().InternalData(), /*destination_buffer=*/GetAllocationSlice(*inst), /*mem_size=*/ llvm_ir::ByteSizeOf(operand->shape(), diff --git a/tensorflow/compiler/xla/service/gpu/pad_insertion.cc b/tensorflow/compiler/xla/service/gpu/pad_insertion.cc index c645e84aa4..4e130de311 100644 --- a/tensorflow/compiler/xla/service/gpu/pad_insertion.cc +++ b/tensorflow/compiler/xla/service/gpu/pad_insertion.cc @@ -61,7 +61,7 @@ HloInstruction* MaybePaddedAndSlicedInput( PrimitiveType element_type = input->shape().element_type(); HloInstruction* padding = computation->AddInstruction(HloInstruction::CreateConstant( - MakeUnique<Literal>(LiteralUtil::Zero(element_type)))); + MakeUnique<Literal>(Literal::Zero(element_type)))); input = computation->AddInstruction(HloInstruction::CreatePad( ShapeInference::InferPadShape( /*operand_shape=*/input->shape(), @@ -126,7 +126,7 @@ HloInstruction* MaybePaddedKernel(const Window& conv_window, PrimitiveType element_type = kernel->shape().element_type(); HloInstruction* padding = computation->AddInstruction(HloInstruction::CreateConstant( - MakeUnique<Literal>(LiteralUtil::Zero(element_type)))); + MakeUnique<Literal>(Literal::Zero(element_type)))); return computation->AddInstruction(HloInstruction::CreatePad( ShapeInference::InferPadShape( /*operand_shape=*/kernel->shape(), @@ -241,9 +241,9 @@ bool PadInsertion::CanonicalizeBackwardFilterConvolution( // Create a new backward convolution replacing the old one. HloComputation* computation = backward_conv->parent(); HloInstruction* output = backward_conv->mutable_operand(1); - HloInstruction* padding = computation->AddInstruction( - HloInstruction::CreateConstant(MakeUnique<Literal>( - LiteralUtil::Zero(input->shape().element_type())))); + HloInstruction* padding = + computation->AddInstruction(HloInstruction::CreateConstant( + MakeUnique<Literal>(Literal::Zero(input->shape().element_type())))); HloInstruction* padded_input = computation->AddInstruction(HloInstruction::CreatePad( ShapeInference::InferPadShape(input->shape(), padding->shape(), diff --git a/tensorflow/compiler/xla/service/gpu/while_transformer.cc b/tensorflow/compiler/xla/service/gpu/while_transformer.cc index 06b01d311d..3034ed06b7 100644 --- a/tensorflow/compiler/xla/service/gpu/while_transformer.cc +++ b/tensorflow/compiler/xla/service/gpu/while_transformer.cc @@ -37,8 +37,8 @@ namespace { // patterns to match. // // Each ExprTree node is comprised of an HloOpcode, and a set of operands (each -// of type ExprTree). Operands can be added by specifying the index and HloOpcode -// of the operand. +// of type ExprTree). Operands can be added by specifying the index and +// HloOpcode of the operand. // // For example, the following computation: // @@ -197,10 +197,9 @@ class MatcherBase { return InvalidArgument("Must use S32 or S64 integral types."); } if (type == S32) { - *const_value = - static_cast<int64>(LiteralUtil::GetFirstElement<int32>(literal)); + *const_value = static_cast<int64>(literal.GetFirstElement<int32>()); } else if (type == S64) { - *const_value = LiteralUtil::GetFirstElement<int64>(literal); + *const_value = literal.GetFirstElement<int64>(); } return tensorflow::Status::OK(); } diff --git a/tensorflow/compiler/xla/service/gpu/while_transformer_test.cc b/tensorflow/compiler/xla/service/gpu/while_transformer_test.cc index e82491fd6f..51d38f8421 100644 --- a/tensorflow/compiler/xla/service/gpu/while_transformer_test.cc +++ b/tensorflow/compiler/xla/service/gpu/while_transformer_test.cc @@ -41,7 +41,7 @@ class WhileTransformerTest : public HloTestBase { const int64 tuple_index, const int64 limit) { auto builder = HloComputation::Builder(TestName() + ".Condition"); auto limit_const = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(limit))); + HloInstruction::CreateConstant(Literal::CreateR0<int32>(limit))); auto loop_state = builder.AddInstruction( HloInstruction::CreateParameter(0, loop_state_shape_, "loop_state")); auto induction_variable = @@ -64,8 +64,8 @@ class WhileTransformerTest : public HloTestBase { auto induction_variable = builder.AddInstruction(HloInstruction::CreateGetTupleElement( induction_variable_shape_, loop_state, ind_var_tuple_index)); - auto inc = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR0<int32>(increment))); + auto inc = builder.AddInstruction( + HloInstruction::CreateConstant(Literal::CreateR0<int32>(increment))); auto add0 = builder.AddInstruction(HloInstruction::CreateBinary( induction_variable->shape(), HloOpcode::kAdd, induction_variable, inc)); // Update data GTE(data_tuple_index). @@ -88,12 +88,10 @@ class WhileTransformerTest : public HloTestBase { const int64 ind_var_tuple_index, const int64 ind_var_init) { auto builder = HloComputation::Builder(TestName() + ".While"); - auto induction_var_init = - builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR0<int32>(ind_var_init))); - auto data_init = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>( - {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}))); + auto induction_var_init = builder.AddInstruction( + HloInstruction::CreateConstant(Literal::CreateR0<int32>(ind_var_init))); + auto data_init = builder.AddInstruction(HloInstruction::CreateConstant( + Literal::CreateR1<float>({0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}))); auto loop_state_init = ind_var_tuple_index == 0 ? builder.AddInstruction( diff --git a/tensorflow/compiler/xla/service/gpu_transfer_manager.cc b/tensorflow/compiler/xla/service/gpu_transfer_manager.cc index 3e4e590d3b..cd1b182b22 100644 --- a/tensorflow/compiler/xla/service/gpu_transfer_manager.cc +++ b/tensorflow/compiler/xla/service/gpu_transfer_manager.cc @@ -113,8 +113,7 @@ GpuTransferManager::TransferLiteralToInfeedInternal( ShapeUtil::HumanString(literal.shape()).c_str()); } - return TransferBufferToInfeedInternal(executor, size, - LiteralUtil::InternalData(literal)); + return TransferBufferToInfeedInternal(executor, size, literal.InternalData()); } StatusOr<gpu::InfeedBuffer*> GpuTransferManager::TransferBufferToInfeedInternal( diff --git a/tensorflow/compiler/xla/service/graphviz_example.cc b/tensorflow/compiler/xla/service/graphviz_example.cc index cd00a41a03..d194b3a310 100644 --- a/tensorflow/compiler/xla/service/graphviz_example.cc +++ b/tensorflow/compiler/xla/service/graphviz_example.cc @@ -47,7 +47,7 @@ HloComputation* AddScalarConstantComputation(int64 addend, HloModule* module) { auto x_value = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {}), "x_value")); auto half = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.5))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(0.5))); builder.AddInstruction(HloInstruction::CreateBinary( half->shape(), HloOpcode::kAdd, x_value, half)); return module->AddEmbeddedComputation(builder.Build()); @@ -118,7 +118,7 @@ std::unique_ptr<HloModule> MakeBigGraph() { auto rng = builder.AddInstruction( HloInstruction::CreateRng(vshape, RNG_UNIFORM, {param_m, param_m})); auto one = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto add_computation = ScalarSumComputation(module.get()); builder.AddInstruction( HloInstruction::CreateReduce(vshape, rng, one, {1}, add_computation)); diff --git a/tensorflow/compiler/xla/service/heap_simulator_test.cc b/tensorflow/compiler/xla/service/heap_simulator_test.cc index 60a0768a86..fefc4c6a0f 100644 --- a/tensorflow/compiler/xla/service/heap_simulator_test.cc +++ b/tensorflow/compiler/xla/service/heap_simulator_test.cc @@ -173,7 +173,7 @@ class HeapSimulatorTest : public HloTestBase { TEST_F(HeapSimulatorTest, ScalarConstant) { auto builder = HloComputation::Builder(TestName()); auto const0 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); // Constants aren't assigned. See b/32248867 HeapSimulatorTracker tracker(TestName(), builder.Build(), {const0}); diff --git a/tensorflow/compiler/xla/service/hlo_alias_analysis_test.cc b/tensorflow/compiler/xla/service/hlo_alias_analysis_test.cc index 24c467d411..a869689b46 100644 --- a/tensorflow/compiler/xla/service/hlo_alias_analysis_test.cc +++ b/tensorflow/compiler/xla/service/hlo_alias_analysis_test.cc @@ -87,9 +87,9 @@ TEST_F(HloAliasAnalysisTest, BinaryOperation) { // Test the analysis on a single binary operation (Add). auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto add = builder.AddInstruction(HloInstruction::CreateBinary( scalar_shape_, HloOpcode::kAdd, constant1, constant2)); module_.AddEntryComputation(builder.Build()); @@ -196,9 +196,9 @@ TEST_F(HloAliasAnalysisTest, SingleCall) { auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto call = builder.AddInstruction(HloInstruction::CreateCall( scalar_shape_, {constant1, constant2}, called_computation)); module_.AddEntryComputation(builder.Build()); @@ -233,9 +233,9 @@ TEST_F(HloAliasAnalysisTest, ComputationCalledTwice) { auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto call1 = builder.AddInstruction(HloInstruction::CreateCall( scalar_shape_, {constant1, constant2}, called_computation)); auto call2 = builder.AddInstruction(HloInstruction::CreateCall( @@ -310,15 +310,15 @@ TEST_F(HloAliasAnalysisTest, SingleWhile) { auto cond_param = cond_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); cond_builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); + HloInstruction::CreateConstant(Literal::CreateR0<bool>(false))); HloComputation* condition = module_.AddEmbeddedComputation(cond_builder.Build()); auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); auto xla_while = builder.AddInstruction( @@ -398,15 +398,15 @@ TEST_F(HloAliasAnalysisTest, SequentialWhiles) { cond_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); cond_builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); + HloInstruction::CreateConstant(Literal::CreateR0<bool>(false))); HloComputation* condition = module_.AddEmbeddedComputation(cond_builder.Build()); auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); auto xla_while0 = builder.AddInstruction( @@ -453,7 +453,7 @@ TEST_F(HloAliasAnalysisTest, NestedWhiles) { cond_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); cond_builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); + HloInstruction::CreateConstant(Literal::CreateR0<bool>(false))); HloComputation* condition = module_.AddEmbeddedComputation(cond_builder.Build()); @@ -491,9 +491,9 @@ TEST_F(HloAliasAnalysisTest, NestedWhiles) { auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); auto entry_while = builder.AddInstruction( @@ -554,17 +554,17 @@ TEST_F(HloAliasAnalysisTest, SwizzlingWhile) { cond_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); auto cond_constant = cond_builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); + HloInstruction::CreateConstant(Literal::CreateR0<bool>(false))); HloComputation* condition = module_.AddEmbeddedComputation(cond_builder.Build()); auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto constant3 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(3.0))); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2, constant3})); auto xla_while = builder.AddInstruction( @@ -600,15 +600,15 @@ TEST_F(HloAliasAnalysisTest, TupleSelect) { // instruction. auto builder = HloComputation::Builder(TestName()); auto pred = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); + HloInstruction::CreateConstant(Literal::CreateR0<bool>(false))); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto constant3 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(3.0))); auto constant4 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(4.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(4.0))); auto tuple1 = builder.AddInstruction(HloInstruction::CreateTuple({constant1})); auto tuple2 = @@ -694,16 +694,16 @@ TEST_F(HloAliasAnalysisTest, TupleSelectToWhile) { auto cond_param = cond_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); cond_builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); + HloInstruction::CreateConstant(Literal::CreateR0<bool>(false))); HloComputation* condition = module_.AddEmbeddedComputation(cond_builder.Build()); auto pred = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); + HloInstruction::CreateConstant(Literal::CreateR0<bool>(false))); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto tuple1 = builder.AddInstruction(HloInstruction::CreateTuple({constant1})); auto tuple2 = @@ -742,7 +742,7 @@ TEST_F(HloAliasAnalysisTest, Bitcast) { // Bitcasting a value should not produce a new buffer. auto builder = HloComputation::Builder(TestName()); auto constant = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto bitcast = builder.AddInstruction(HloInstruction::CreateUnary( scalar_shape_, HloOpcode::kBitcast, constant)); diff --git a/tensorflow/compiler/xla/service/hlo_computation_test.cc b/tensorflow/compiler/xla/service/hlo_computation_test.cc index 5d49c83e2d..057d1ce09b 100644 --- a/tensorflow/compiler/xla/service/hlo_computation_test.cc +++ b/tensorflow/compiler/xla/service/hlo_computation_test.cc @@ -110,7 +110,7 @@ TEST_F(HloComputationTest, PostOrderSingleton) { // Test GetInstructionPostOrder for a computation with one instruction. auto builder = HloComputation::Builder(TestName()); auto constant = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f))); auto computation = builder.Build(); EXPECT_THAT(computation->MakeInstructionPostOrder(), ElementsAre(constant)); @@ -121,7 +121,7 @@ TEST_F(HloComputationTest, PostOrderSimple) { // instructions. auto builder = HloComputation::Builder(TestName()); auto constant = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f))); auto negate1 = builder.AddInstruction( HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, constant)); auto negate2 = builder.AddInstruction( @@ -136,7 +136,7 @@ TEST_F(HloComputationTest, PostOrderTrace) { // Test GetInstructionPostOrder for a computation with a trace instruction. auto builder = HloComputation::Builder(TestName()); auto constant = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f))); auto negate1 = builder.AddInstruction( HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, constant)); auto trace = @@ -155,13 +155,13 @@ TEST_F(HloComputationTest, PostOrderDisconnectedInstructions) { // which are not connected. auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f))); auto constant3 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f))); auto constant4 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f))); auto computation = builder.Build(); EXPECT_THAT(computation->MakeInstructionPostOrder(), @@ -173,11 +173,11 @@ TEST_F(HloComputationTest, PostOrderWithMultipleRoots) { // which are not connected. auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f))); auto constant3 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f))); auto add1 = builder.AddInstruction(HloInstruction::CreateBinary( r0f32_, HloOpcode::kAdd, constant1, constant2)); auto add2 = builder.AddInstruction(HloInstruction::CreateBinary( @@ -197,11 +197,11 @@ TEST_F(HloComputationTest, VisitWithMultipleRoots) { // computation has multiple roots (dead code). auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f))); auto constant3 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f))); // Add three disconnected add expressions. builder.AddInstruction(HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, constant1, constant2)); @@ -248,7 +248,7 @@ TEST_F(HloComputationTest, DeepCopyArray) { // Test that DeepCopyInstruction properly copies an array. auto builder = HloComputation::Builder(TestName()); auto constant = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0}))); + Literal::CreateR1<float>({1.0, 2.0, 3.0}))); auto computation = builder.Build(); auto copy = computation->DeepCopyInstruction(constant).ValueOrDie(); @@ -260,9 +260,9 @@ TEST_F(HloComputationTest, DeepCopyTuple) { // Test that DeepCopyInstruction properly copies a tuple. auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0}))); + Literal::CreateR1<float>({1.0, 2.0, 3.0}))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0))); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); @@ -280,7 +280,7 @@ TEST_F(HloComputationTest, CycleDetection) { // Test whether the visitor can detect cycles in the graph. auto builder = HloComputation::Builder(TestName()); auto constant = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f))); auto negate = builder.AddInstruction( HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, constant)); auto add = builder.AddInstruction( @@ -303,7 +303,7 @@ TEST_F(HloComputationTest, RemoveInstructionWithDuplicateOperand) { // twice. auto builder = HloComputation::Builder(TestName()); auto constant = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f))); auto dead_negate = builder.AddInstruction( HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, constant)); auto dead_add = builder.AddInstruction(HloInstruction::CreateBinary( @@ -326,9 +326,9 @@ TEST_F(HloComputationTest, RemoveInstructionWithDuplicateOperand) { TEST_F(HloComputationTest, CloneWithControlDependency) { auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0f))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0f))); auto add = builder.AddInstruction(HloInstruction::CreateBinary( r0f32_, HloOpcode::kAdd, constant1, constant2)); diff --git a/tensorflow/compiler/xla/service/hlo_constant_folding_test.cc b/tensorflow/compiler/xla/service/hlo_constant_folding_test.cc index 04ab02995b..a643bc4076 100644 --- a/tensorflow/compiler/xla/service/hlo_constant_folding_test.cc +++ b/tensorflow/compiler/xla/service/hlo_constant_folding_test.cc @@ -41,7 +41,7 @@ using HloConstantFoldingTest = HloTestBase; TEST_F(HloConstantFoldingTest, ConvertF32ToS64) { HloComputation::Builder builder(TestName()); HloInstruction* input = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f))); builder.AddInstruction( HloInstruction::CreateConvert(ShapeUtil::MakeShape(S64, {}), input)); @@ -55,15 +55,14 @@ TEST_F(HloConstantFoldingTest, ConvertF32ToS64) { EXPECT_TRUE(result); EXPECT_THAT(computation->root_instruction(), op::Constant()); - EXPECT_EQ(LiteralUtil::GetFirstElement<int64>( - computation->root_instruction()->literal()), + EXPECT_EQ(computation->root_instruction()->literal().GetFirstElement<int64>(), 42); } TEST_F(HloConstantFoldingTest, ConvertS64ToF32) { HloComputation::Builder builder(TestName()); HloInstruction* input = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<int64>(42))); + HloInstruction::CreateConstant(Literal::CreateR0<int64>(42))); builder.AddInstruction( HloInstruction::CreateConvert(ShapeUtil::MakeShape(F32, {}), input)); @@ -77,15 +76,14 @@ TEST_F(HloConstantFoldingTest, ConvertS64ToF32) { EXPECT_TRUE(result); EXPECT_THAT(computation->root_instruction(), op::Constant()); - EXPECT_EQ(LiteralUtil::GetFirstElement<float>( - computation->root_instruction()->literal()), + EXPECT_EQ(computation->root_instruction()->literal().GetFirstElement<float>(), 42.0f); } TEST_F(HloConstantFoldingTest, ConvertF32ArrayToS64Array) { HloComputation::Builder builder(TestName()); - HloInstruction* input = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR1<float>({42.0f, 19.0f}))); + HloInstruction* input = builder.AddInstruction( + HloInstruction::CreateConstant(Literal::CreateR1<float>({42.0f, 19.0f}))); builder.AddInstruction( HloInstruction::CreateConvert(ShapeUtil::MakeShape(S64, {2}), input)); @@ -99,12 +97,8 @@ TEST_F(HloConstantFoldingTest, ConvertF32ArrayToS64Array) { EXPECT_TRUE(result); EXPECT_THAT(computation->root_instruction(), op::Constant()); - EXPECT_EQ( - LiteralUtil::Get<int64>(computation->root_instruction()->literal(), {0}), - 42); - EXPECT_EQ( - LiteralUtil::Get<int64>(computation->root_instruction()->literal(), {1}), - 19); + EXPECT_EQ(computation->root_instruction()->literal().Get<int64>({0}), 42); + EXPECT_EQ(computation->root_instruction()->literal().Get<int64>({1}), 19); } TEST_F(HloConstantFoldingTest, Concatenate) { @@ -126,7 +120,7 @@ TEST_F(HloConstantFoldingTest, Concatenate) { for (auto csize : test_config.concat_sizes) { dimensions[test_config.concat_dimension] = csize; concat_size += csize; - auto literal = LiteralUtil::CreateFromDimensions(F32, dimensions); + auto literal = Literal::CreateFromDimensions(F32, dimensions); HloInstruction* insn = builder.AddInstruction( HloInstruction::CreateConstant(std::move(literal))); operands.push_back(insn); @@ -179,7 +173,7 @@ TEST_F(HloConstantFoldingTest, TransposeConstantFold) { TF_ASSIGN_OR_ASSERT_OK(auto literal, LiteralTestUtil::CreateRandomLiteral<F32>( ShapeUtil::MakeShape(F32, dimensions), 0.0, 1.0)); - auto literal_clone = LiteralUtil::CloneToUnique(*literal); + auto literal_clone = literal->Literal::CloneToUnique(); HloInstruction* literal_instruction = builder.AddInstruction( HloInstruction::CreateConstant(std::move(literal))); Shape shape = ShapeUtil::MakeShape(F32, {8, 7, 11, 9, 5}); @@ -199,12 +193,10 @@ TEST_F(HloConstantFoldingTest, TransposeConstantFold) { using NativeT = typename primitive_util::PrimitiveTypeToNative<F32>::type; bool matched = true; - LiteralUtil::EachCell<NativeT>( - root->literal(), + root->literal().EachCell<NativeT>( [&](tensorflow::gtl::ArraySlice<int64> indices, NativeT value) { std::vector<int64> rindexes = Permute(permutation, indices); - matched = matched && (value == LiteralUtil::Get<NativeT>(*literal_clone, - rindexes)); + matched = matched && (value == literal_clone->Get<NativeT>(rindexes)); }); EXPECT_TRUE(matched); } diff --git a/tensorflow/compiler/xla/service/hlo_cost_analysis_test.cc b/tensorflow/compiler/xla/service/hlo_cost_analysis_test.cc index b74c7eb4e0..5c71056bb5 100644 --- a/tensorflow/compiler/xla/service/hlo_cost_analysis_test.cc +++ b/tensorflow/compiler/xla/service/hlo_cost_analysis_test.cc @@ -342,11 +342,11 @@ TEST_F(FusionCostAnalysis, LoopFusion) { // mul = Mul(exp, C3) // sub = Sub(mul, clamp) // tuple = Tuple({sub, sub, mul, C1}) - auto c1 = HloInstruction::CreateConstant(LiteralUtil::CreateR2F32Linspace( + auto c1 = HloInstruction::CreateConstant(Literal::CreateR2F32Linspace( /*from=*/0.0f, /*to=*/1.0f, /*rows=*/2, /*cols=*/2)); - auto c2 = HloInstruction::CreateConstant(LiteralUtil::CreateR2F32Linspace( + auto c2 = HloInstruction::CreateConstant(Literal::CreateR2F32Linspace( /*from=*/1.0f, /*to=*/2.0f, /*rows=*/2, /*cols=*/2)); - auto c3 = HloInstruction::CreateConstant(LiteralUtil::CreateR2F32Linspace( + auto c3 = HloInstruction::CreateConstant(Literal::CreateR2F32Linspace( /*from=*/2.0f, /*to=*/3.0f, /*rows=*/2, /*cols=*/2)); auto add = @@ -383,9 +383,8 @@ TEST_F(FusionCostAnalysis, NoLayout) { shape_without_layout.clear_layout(); auto c1 = HloInstruction::CreateConstant( - LiteralUtil::CreateR4FromArray4D(Array4D<float>(2, 3, 4, 5))); - auto c2 = - HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({1, 2, 3})); + Literal::CreateR4FromArray4D(Array4D<float>(2, 3, 4, 5))); + auto c2 = HloInstruction::CreateConstant(Literal::CreateR1<float>({1, 2, 3})); auto broadcast = HloInstruction::CreateBroadcast(shape_without_layout, c2.get(), {1}); diff --git a/tensorflow/compiler/xla/service/hlo_cse.cc b/tensorflow/compiler/xla/service/hlo_cse.cc index 4c6af5c40f..0fef89a06d 100644 --- a/tensorflow/compiler/xla/service/hlo_cse.cc +++ b/tensorflow/compiler/xla/service/hlo_cse.cc @@ -68,7 +68,7 @@ bool CombineConstants(HloComputation* computation, bool is_layout_sensitive) { auto range = constants.equal_range(shape_string); HloInstruction* match = nullptr; for (auto it = range.first; it != range.second; ++it) { - if (LiteralUtil::Equal(instruction->literal(), it->second->literal())) { + if (instruction->literal().Equal(it->second->literal())) { match = it->second; break; } diff --git a/tensorflow/compiler/xla/service/hlo_cse_test.cc b/tensorflow/compiler/xla/service/hlo_cse_test.cc index cc39c3ac20..8b0b9c8bbd 100644 --- a/tensorflow/compiler/xla/service/hlo_cse_test.cc +++ b/tensorflow/compiler/xla/service/hlo_cse_test.cc @@ -51,9 +51,9 @@ TEST_F(HloCseTest, CombineTwoConstants) { // Test that two identical constants are commoned. auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f))); builder.AddInstruction(HloInstruction::CreateBinary( constant1->shape(), HloOpcode::kAdd, constant1, constant2)); @@ -67,10 +67,10 @@ TEST_F(HloCseTest, CombineTwoConstants) { EXPECT_EQ(2, computation->instruction_count()); HloInstruction* constant = computation->instructions().begin()->get(); - EXPECT_EQ(42.0f, LiteralUtil::Get<float>(constant->literal(), {})); + EXPECT_EQ(42.0f, constant->literal().Get<float>({})); auto result = ExecuteAndTransfer(std::move(module), {}); - auto expected = LiteralUtil::CreateR0<float>(84.0); + auto expected = Literal::CreateR0<float>(84.0); LiteralTestUtil::ExpectNear(*expected, *result, ErrorSpec(1e-4)); } @@ -102,7 +102,7 @@ TEST_F(HloCseTest, CombineTwoConstantsDifferentLayoutsAndInsensitive) { EXPECT_THAT(add, op::Add(first_operand, first_operand)); auto result = ExecuteAndTransfer(std::move(module), {}); - auto expected = LiteralUtil::CreateR2<float>({{2.0, 4.0}, {6.0, 8.0}}); + auto expected = Literal::CreateR2<float>({{2.0, 4.0}, {6.0, 8.0}}); LiteralTestUtil::ExpectNear(*expected, *result, ErrorSpec(1e-4)); } @@ -132,7 +132,7 @@ TEST_F(HloCseTest, CombineTwoConstantsDifferentLayoutsAndSensitive) { EXPECT_THAT(add, op::Add(constant1, constant2)); auto result = ExecuteAndTransfer(std::move(module), {}); - auto expected = LiteralUtil::CreateR2<float>({{2.0, 4.0}, {6.0, 8.0}}); + auto expected = Literal::CreateR2<float>({{2.0, 4.0}, {6.0, 8.0}}); LiteralTestUtil::ExpectNear(*expected, *result, ErrorSpec(1e-4)); } @@ -141,20 +141,20 @@ TEST_F(HloCseTest, ConstantsSameValueDifferentType) { // commoned. auto builder = HloComputation::Builder(TestName()); builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<uint32>(42))); + HloInstruction::CreateConstant(Literal::CreateR0<uint32>(42))); builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(42))); + HloInstruction::CreateConstant(Literal::CreateR0<int32>(42))); builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<uint64>(42.0))); + HloInstruction::CreateConstant(Literal::CreateR0<uint64>(42.0))); builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<int64>(42.0))); + HloInstruction::CreateConstant(Literal::CreateR0<int64>(42.0))); builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<double>(42.0))); + HloInstruction::CreateConstant(Literal::CreateR0<double>(42.0))); builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f))); // Duplicate the float constant to verify something happens. builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f))); auto module = CreateNewModule(); auto computation = module->AddEntryComputation(builder.Build()); @@ -171,13 +171,13 @@ TEST_F(HloCseTest, NonscalarConstants) { // Test that identical nonscalar constants are merged. auto builder = HloComputation::Builder(TestName()); auto common_constant1 = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}))); + Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}))); auto common_constant2 = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}))); + Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}))); // Create a constant which has the same shape but a different value. auto uncommon_constant = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<float>({{2.0, 4.0}, {6.0, 8.0}}))); + Literal::CreateR2<float>({{2.0, 4.0}, {6.0, 8.0}}))); // Tie the constants together with a tuple. This makes it easier to refer to // the constant instructions via their use. @@ -206,7 +206,7 @@ TEST_F(HloCseTest, IdenticalInstructions) { // Test that three identical instructions are commoned. auto builder = HloComputation::Builder(TestName()); auto constant = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0))); auto exp1 = builder.AddInstruction(HloInstruction::CreateUnary( constant->shape(), HloOpcode::kExp, constant)); auto exp2 = builder.AddInstruction(HloInstruction::CreateUnary( @@ -236,7 +236,7 @@ TEST_F(HloCseTest, IdenticalInstructionsDifferentLayoutsSensitive) { // commoned if the pass is layout sensitive. auto builder = HloComputation::Builder(TestName()); auto constant = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}))); + Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}))); auto exp1 = builder.AddInstruction(HloInstruction::CreateUnary( constant->shape(), HloOpcode::kExp, constant)); @@ -267,7 +267,7 @@ TEST_F(HloCseTest, IdenticalInstructionsDifferentLayoutsInsensitive) { // the pass is layout insensitive. auto builder = HloComputation::Builder(TestName()); auto constant = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}))); + Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}))); auto exp1 = builder.AddInstruction(HloInstruction::CreateUnary( constant->shape(), HloOpcode::kExp, constant)); @@ -311,7 +311,7 @@ TEST_F(HloCseTest, IdenticalExpressions) { // The *1 instructions should be merged with the *2 instructions. auto builder = HloComputation::Builder(TestName()); auto constant = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0))); auto negate1 = builder.AddInstruction(HloInstruction::CreateUnary( constant->shape(), HloOpcode::kNegate, constant)); @@ -349,9 +349,9 @@ TEST_F(HloCseTest, DoNotCombineRng) { // Test that two RNG ops are not commoned. auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0f))); auto rng1 = builder.AddInstruction(HloInstruction::CreateRng( ShapeUtil::MakeShape(F32, {}), RandomDistribution::RNG_UNIFORM, {constant1, constant2})); @@ -392,9 +392,9 @@ TEST_F(HloCseTest, DISABLED_DoNotCombineCallsToImpureFunctions) { Shape scalar_shape = ShapeUtil::MakeShape(F32, {}); auto builder = HloComputation::Builder(TestName() + "_rng_fun"); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0f))); auto rng = builder.AddInstruction(HloInstruction::CreateRng( scalar_shape, RandomDistribution::RNG_UNIFORM, {constant1, constant2})); auto param = builder.AddInstruction(HloInstruction::CreateParameter( @@ -409,7 +409,7 @@ TEST_F(HloCseTest, DISABLED_DoNotCombineCallsToImpureFunctions) { { auto builder = HloComputation::Builder(TestName()); auto constant = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({5.0f}))); + HloInstruction::CreateConstant(Literal::CreateR1<float>({5.0f}))); auto rng1 = builder.AddInstruction( HloInstruction::CreateMap(constant->shape(), {constant}, rng_function)); auto rng2 = builder.AddInstruction( diff --git a/tensorflow/compiler/xla/service/hlo_dataflow_analysis_test.cc b/tensorflow/compiler/xla/service/hlo_dataflow_analysis_test.cc index 21344af5f2..a97620cd0d 100644 --- a/tensorflow/compiler/xla/service/hlo_dataflow_analysis_test.cc +++ b/tensorflow/compiler/xla/service/hlo_dataflow_analysis_test.cc @@ -73,9 +73,9 @@ TEST_P(HloDataflowAnalysisTest, BinaryOperation) { // Test the dataflow for a simple binary operation (Add). auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto add = builder.AddInstruction(HloInstruction::CreateBinary( scalar_shape_, HloOpcode::kAdd, constant1, constant2)); module_.AddEntryComputation(builder.Build()); @@ -176,9 +176,9 @@ TEST_P(HloDataflowAnalysisTest, NestedTuple) { // auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); auto nested_tuple = builder.AddInstruction( @@ -240,9 +240,9 @@ TEST_P(HloDataflowAnalysisTest, SingleCall) { auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto call = builder.AddInstruction(HloInstruction::CreateCall( scalar_shape_, {constant1, constant2}, called_computation)); module_.AddEntryComputation(builder.Build()); @@ -289,9 +289,9 @@ TEST_P(HloDataflowAnalysisTest, ComputationCalledTwiceWithSameArguments) { auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto call1 = builder.AddInstruction(HloInstruction::CreateCall( scalar_shape_, {constant1, constant2}, called_computation)); auto call2 = builder.AddInstruction(HloInstruction::CreateCall( @@ -343,9 +343,9 @@ TEST_P(HloDataflowAnalysisTest, ComputationCalledTwiceWithDifferentArguments) { auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto call1 = builder.AddInstruction(HloInstruction::CreateCall( scalar_shape_, {constant1, constant2}, called_computation)); auto call2 = builder.AddInstruction(HloInstruction::CreateCall( @@ -407,9 +407,9 @@ TEST_P(HloDataflowAnalysisTest, NestedCalls) { auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto call = builder.AddInstruction(HloInstruction::CreateCall( scalar_shape_, {constant1, constant2}, outer_computation)); module_.AddEntryComputation(builder.Build()); @@ -474,15 +474,15 @@ TEST_P(HloDataflowAnalysisTest, SingleWhile) { auto cond_param = cond_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); cond_builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); + HloInstruction::CreateConstant(Literal::CreateR0<bool>(false))); HloComputation* condition = module_.AddEmbeddedComputation(cond_builder.Build()); auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); auto xla_while = builder.AddInstruction( @@ -571,15 +571,15 @@ TEST_P(HloDataflowAnalysisTest, SequentialWhiles) { cond_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); cond_builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); + HloInstruction::CreateConstant(Literal::CreateR0<bool>(false))); HloComputation* condition = module_.AddEmbeddedComputation(cond_builder.Build()); auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); auto xla_while0 = builder.AddInstruction( @@ -630,7 +630,7 @@ TEST_P(HloDataflowAnalysisTest, NestedWhiles) { cond_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); cond_builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); + HloInstruction::CreateConstant(Literal::CreateR0<bool>(false))); HloComputation* condition = module_.AddEmbeddedComputation(cond_builder.Build()); @@ -668,9 +668,9 @@ TEST_P(HloDataflowAnalysisTest, NestedWhiles) { auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); auto entry_while = builder.AddInstruction( @@ -757,15 +757,15 @@ TEST_P(HloDataflowAnalysisTest, SwizzlingWhile) { auto cond_param = cond_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); cond_builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); + HloInstruction::CreateConstant(Literal::CreateR0<bool>(false))); HloComputation* condition = module_.AddEmbeddedComputation(cond_builder.Build()); auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); auto xla_while = builder.AddInstruction( @@ -817,11 +817,11 @@ TEST_P(HloDataflowAnalysisTest, ArraySelect) { // Test a kSelect of an array value. auto builder = HloComputation::Builder(TestName()); auto pred = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); + HloInstruction::CreateConstant(Literal::CreateR0<bool>(false))); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto select = builder.AddInstruction(HloInstruction::CreateTernary( scalar_shape_, HloOpcode::kSelect, pred, constant1, constant2)); @@ -841,15 +841,15 @@ TEST_P(HloDataflowAnalysisTest, TupleSelect) { // instruction. auto builder = HloComputation::Builder(TestName()); auto pred = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); + HloInstruction::CreateConstant(Literal::CreateR0<bool>(false))); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto constant3 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(3.0))); auto constant4 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(4.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(4.0))); auto tuple1 = builder.AddInstruction(HloInstruction::CreateTuple({constant1})); auto tuple2 = @@ -913,17 +913,17 @@ TEST_P(HloDataflowAnalysisTest, NestedTupleSelect) { // Test kSelect of a nested tuple. auto builder = HloComputation::Builder(TestName()); auto pred = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); + HloInstruction::CreateConstant(Literal::CreateR0<bool>(false))); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto constant3 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(3.0))); auto constant4 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(4.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(4.0))); auto constant5 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(5.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(5.0))); auto inner_tuple1 = builder.AddInstruction( HloInstruction::CreateTuple({constant2, constant3})); auto tuple1 = builder.AddInstruction( @@ -999,18 +999,18 @@ TEST_P(HloDataflowAnalysisTest, TupleSelectToWhile) { cond_builder.AddInstruction( HloInstruction::CreateParameter(0, tuple_shape, "param")); cond_builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); + HloInstruction::CreateConstant(Literal::CreateR0<bool>(false))); HloComputation* condition = module_.AddEmbeddedComputation(cond_builder.Build()); auto pred = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); + HloInstruction::CreateConstant(Literal::CreateR0<bool>(false))); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto constant3 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(3.0))); auto tuple1 = builder.AddInstruction(HloInstruction::CreateTuple({constant1})); auto tuple2 = @@ -1062,7 +1062,7 @@ TEST_P(HloDataflowAnalysisTest, BitcastDefinesValue) { // Test the bitcast_defines_value flag to the dataflow analysis. auto builder = HloComputation::Builder(TestName()); auto constant = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto bitcast = builder.AddInstruction(HloInstruction::CreateUnary( scalar_shape_, HloOpcode::kBitcast, constant)); diff --git a/tensorflow/compiler/xla/service/hlo_dce_test.cc b/tensorflow/compiler/xla/service/hlo_dce_test.cc index 10cd7ca7c0..704b8dfca7 100644 --- a/tensorflow/compiler/xla/service/hlo_dce_test.cc +++ b/tensorflow/compiler/xla/service/hlo_dce_test.cc @@ -45,9 +45,9 @@ TEST_F(HloDceTest, NoDeadCode) { // Verify that no dead code is removed from a computation with no dead code. auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(123.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(123.0f))); builder.AddInstruction(HloInstruction::CreateBinary( constant1->shape(), HloOpcode::kAdd, constant1, constant2)); @@ -98,9 +98,9 @@ TEST_F(HloDceTest, ControlDependencies) { // Verify that instructions with control dependencies are not removed. auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(123.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(123.0f))); // Create two dead instructions: a negate and an add. auto dead_negate = builder.AddInstruction(HloInstruction::CreateUnary( diff --git a/tensorflow/compiler/xla/service/hlo_evaluator.cc b/tensorflow/compiler/xla/service/hlo_evaluator.cc index 48eefd92ae..a42289590b 100644 --- a/tensorflow/compiler/xla/service/hlo_evaluator.cc +++ b/tensorflow/compiler/xla/service/hlo_evaluator.cc @@ -89,11 +89,11 @@ StatusOr<std::unique_ptr<Literal>> Compare(const Shape& shape, HloOpcode opcode, << HloOpcodeString(opcode); } - auto result = LiteralUtil::CreateFromShape(shape); - TF_RETURN_IF_ERROR(LiteralUtil::Populate<bool>( - result.get(), [&](tensorflow::gtl::ArraySlice<int64> multi_index) { - return compare_op(LiteralUtil::Get<OperandT>(lhs_literal, multi_index), - LiteralUtil::Get<OperandT>(rhs_literal, multi_index)); + auto result = Literal::CreateFromShape(shape); + TF_RETURN_IF_ERROR(result.get()->Populate<bool>( + [&](tensorflow::gtl::ArraySlice<int64> multi_index) { + return compare_op(lhs_literal.Get<OperandT>(multi_index), + rhs_literal.Get<OperandT>(multi_index)); })); return std::move(result); @@ -117,12 +117,11 @@ StatusOr<std::unique_ptr<Literal>> ElementWiseUnaryOpImpl( ShapeUtil::HumanString(operand->shape()).c_str()); } - auto result = LiteralUtil::CreateFromShape(shape); + auto result = Literal::CreateFromShape(shape); - TF_RETURN_IF_ERROR(LiteralUtil::Populate<ReturnT>( - result.get(), [&](tensorflow::gtl::ArraySlice<int64> multi_index) { - return unary_op( - LiteralUtil::Get<NativeT>(operand_literal, multi_index)); + TF_RETURN_IF_ERROR(result.get()->Populate<ReturnT>( + [&](tensorflow::gtl::ArraySlice<int64> multi_index) { + return unary_op(operand_literal.Get<NativeT>(multi_index)); })); return std::move(result); } @@ -170,19 +169,18 @@ class HloEvaluator::TypedVisitor : public DfsHloVisitorWithDefault { Status HandleBroadcast(HloInstruction* broadcast) override { parent_->evaluated_[broadcast] = - LiteralUtil::CreateFromShape(broadcast->shape()); + Literal::CreateFromShape(broadcast->shape()); auto output = parent_->evaluated_[broadcast].get(); auto operand_to_broadcast = parent_->GetEvaluatedLiteralFor(broadcast->operand(0)); std::vector<int64> broadcast_indices( ShapeUtil::Rank(broadcast->operand(0)->shape()), 0); - return LiteralUtil::Populate<ReturnT>( - output, [&](tensorflow::gtl::ArraySlice<int64> multi_index) { + return output->Populate<ReturnT>( + [&](tensorflow::gtl::ArraySlice<int64> multi_index) { for (int64 i = 0; i < broadcast->dimensions().size(); ++i) { broadcast_indices[i] = multi_index[broadcast->dimensions(i)]; } - return LiteralUtil::Get<ReturnT>(operand_to_broadcast, - broadcast_indices); + return operand_to_broadcast.Get<ReturnT>(broadcast_indices); }); } @@ -205,10 +203,9 @@ class HloEvaluator::TypedVisitor : public DfsHloVisitorWithDefault { template <PrimitiveType src_type, PrimitiveType dest_type> std::unique_ptr<Literal> ConvertIfTypesMatch(const Literal& src_literal) { DCHECK_EQ(src_type, src_literal.shape().element_type()); - return LiteralUtil::Convert< + return src_literal.Convert< typename primitive_util::PrimitiveTypeToNative<src_type>::type, - typename primitive_util::PrimitiveTypeToNative<dest_type>::type>( - src_literal); + typename primitive_util::PrimitiveTypeToNative<dest_type>::type>(); } Status HandleConvert(HloInstruction* convert, @@ -218,9 +215,9 @@ class HloEvaluator::TypedVisitor : public DfsHloVisitorWithDefault { switch (operand->shape().element_type()) { #define CONVERT_IF_TYPES_MATCH(src_type) \ case (src_type): \ - parent_->evaluated_[convert] = LiteralUtil::Convert< \ + parent_->evaluated_[convert] = operand_literal.Convert< \ typename primitive_util::PrimitiveTypeToNative<src_type>::type, \ - ReturnT>(operand_literal); \ + ReturnT>(); \ break; CONVERT_IF_TYPES_MATCH(PRED) CONVERT_IF_TYPES_MATCH(S8) @@ -464,12 +461,12 @@ class HloEvaluator::TypedVisitor : public DfsHloVisitorWithDefault { const Literal& lhs_literal = parent_->GetEvaluatedLiteralFor(lhs); const Literal& rhs_literal = parent_->GetEvaluatedLiteralFor(rhs); - auto result = LiteralUtil::CreateFromShape(shape); + auto result = Literal::CreateFromShape(shape); - TF_RETURN_IF_ERROR(LiteralUtil::Populate<ReturnT>( - result.get(), [&](tensorflow::gtl::ArraySlice<int64> multi_index) { - return binary_op(LiteralUtil::Get<ReturnT>(lhs_literal, multi_index), - LiteralUtil::Get<ReturnT>(rhs_literal, multi_index)); + TF_RETURN_IF_ERROR(result.get()->Populate<ReturnT>( + [&](tensorflow::gtl::ArraySlice<int64> multi_index) { + return binary_op(lhs_literal.Get<ReturnT>(multi_index), + rhs_literal.Get<ReturnT>(multi_index)); })); return std::move(result); } @@ -501,14 +498,13 @@ class HloEvaluator::TypedVisitor : public DfsHloVisitorWithDefault { const Literal& rhs_literal = parent_->GetEvaluatedLiteralFor(rhs); const Literal& ehs_literal = parent_->GetEvaluatedLiteralFor(ehs); - auto result = LiteralUtil::CreateFromShape(shape); + auto result = Literal::CreateFromShape(shape); - TF_RETURN_IF_ERROR(LiteralUtil::Populate<ReturnT>( - result.get(), [&](tensorflow::gtl::ArraySlice<int64> multi_index) { - return ternary_op( - LiteralUtil::Get<LhsType>(lhs_literal, multi_index), - LiteralUtil::Get<RhsType>(rhs_literal, multi_index), - LiteralUtil::Get<EhsType>(ehs_literal, multi_index)); + TF_RETURN_IF_ERROR(result.get()->Populate<ReturnT>( + [&](tensorflow::gtl::ArraySlice<int64> multi_index) { + return ternary_op(lhs_literal.Get<LhsType>(multi_index), + rhs_literal.Get<RhsType>(multi_index), + ehs_literal.Get<EhsType>(multi_index)); })); return std::move(result); @@ -570,7 +566,7 @@ StatusOr<std::unique_ptr<Literal>> HloEvaluator::Evaluate( if (operand->opcode() == HloOpcode::kParameter) { const Literal* input_literal = arg_literals_[operand->parameter_number()]; VLOG(2) << "Parameter operand evaluated to: " - << LiteralUtil::ToString(*input_literal); + << input_literal->ToString(); TF_RET_CHECK(ShapeUtil::Equal(operand->shape(), input_literal->shape())); evaluated_[operand] = MakeUnique<Literal>(*input_literal); @@ -607,8 +603,7 @@ std::unique_ptr<Literal> HloEvaluator::TryEvaluate( Status HloEvaluator::HandleParameter(HloInstruction* parameter) { VLOG(2) << "HandleParameter: " << parameter->ToString(); const Literal* input_literal = arg_literals_[parameter->parameter_number()]; - VLOG(2) << "Parameter evaluated to: " - << LiteralUtil::ToString(*input_literal); + VLOG(2) << "Parameter evaluated to: " << input_literal->ToString(); DCHECK(ShapeUtil::Equal(parameter->shape(), input_literal->shape())); evaluated_[parameter] = MakeUnique<Literal>(*input_literal); @@ -624,14 +619,14 @@ Status HloEvaluator::HandleConstant(HloInstruction* constant, Status HloEvaluator::HandleReshape(HloInstruction* reshape) { TF_ASSIGN_OR_RETURN( evaluated_[reshape], - LiteralUtil::Reshape(GetEvaluatedLiteralFor(reshape->operand(0)), - AsInt64Slice(reshape->shape().dimensions()))); + GetEvaluatedLiteralFor(reshape->operand(0)) + .Reshape(AsInt64Slice(reshape->shape().dimensions()))); return Status::OK(); } Status HloEvaluator::HandleTranspose(HloInstruction* transpose) { - evaluated_[transpose] = LiteralUtil::Transpose( - GetEvaluatedLiteralFor(transpose->operand(0)), transpose->dimensions()); + evaluated_[transpose] = GetEvaluatedLiteralFor(transpose->operand(0)) + .Transpose(transpose->dimensions()); return Status::OK(); } @@ -659,16 +654,16 @@ Status HloEvaluator::HandleConcatenate( ShapeUtil::GetDimension(operand_shape, concat_dim); } - auto result_literal = LiteralUtil::CreateFromDimensions( + auto result_literal = Literal::CreateFromDimensions( reference_shape.element_type(), concat_dimensions); DimensionVector source_indices(rank, 0); DimensionVector dest_indices(concat_dimensions.size(), 0); for (auto operand : operands) { const Shape& operand_shape = operand->shape(); - TF_RETURN_IF_ERROR(LiteralUtil::Copy( - GetEvaluatedLiteralFor(operand), source_indices, result_literal.get(), - dest_indices, AsInt64Slice(operand_shape.dimensions()))); + TF_RETURN_IF_ERROR(result_literal.get()->Copy( + GetEvaluatedLiteralFor(operand), source_indices, dest_indices, + AsInt64Slice(operand_shape.dimensions()))); dest_indices[concat_dim] += ShapeUtil::GetDimension(operand_shape, concat_dim); } @@ -793,14 +788,14 @@ Status HloEvaluator::HandleCompare(HloInstruction* compare, HloOpcode opcode, Status HloEvaluator::HandleSlice(HloInstruction* slice, HloInstruction* operand) { const Shape& shape = slice->shape(); - auto literal = LiteralUtil::CreateFromDimensions( + auto literal = Literal::CreateFromDimensions( shape.element_type(), AsInt64Slice(shape.dimensions())); DimensionVector dest_indices(slice->slice_starts().size(), 0); - TF_RETURN_IF_ERROR(LiteralUtil::Copy( - GetEvaluatedLiteralFor(operand), slice->slice_starts(), literal.get(), - dest_indices, AsInt64Slice(shape.dimensions()))); + TF_RETURN_IF_ERROR(literal.get()->Copy(GetEvaluatedLiteralFor(operand), + slice->slice_starts(), dest_indices, + AsInt64Slice(shape.dimensions()))); evaluated_[slice] = std::move(literal); return Status::OK(); diff --git a/tensorflow/compiler/xla/service/hlo_evaluator_test.cc b/tensorflow/compiler/xla/service/hlo_evaluator_test.cc index 03aa6f7f8c..0db05bf64c 100644 --- a/tensorflow/compiler/xla/service/hlo_evaluator_test.cc +++ b/tensorflow/compiler/xla/service/hlo_evaluator_test.cc @@ -44,9 +44,9 @@ class HloEvaluatorTest : public ::testing::Test { // Verifies that HloEvaluator evaluates a HLO instruction that performs clamp // with 3 operands. TEST_F(HloEvaluatorTest, DoesClamp) { - auto low = LiteralUtil::CreateR2<float>({{0.f, 2.f}, {2.f, 4.f}}); - auto high = LiteralUtil::CreateR2<float>({{2.f, 4.f}, {4.f, 4.f}}); - auto value = LiteralUtil::CreateR2<float>({{0.f, 5.f}, {0.f, 4.f}}); + auto low = Literal::CreateR2<float>({{0.f, 2.f}, {2.f, 4.f}}); + auto high = Literal::CreateR2<float>({{2.f, 4.f}, {4.f, 4.f}}); + auto value = Literal::CreateR2<float>({{0.f, 5.f}, {0.f, 4.f}}); Shape shape = low->shape(); auto c1 = HloInstruction::CreateConstant(std::move(low)); @@ -58,17 +58,17 @@ TEST_F(HloEvaluatorTest, DoesClamp) { std::unique_ptr<Literal> result = evaluator_->Evaluate(instruction.get(), {}).ConsumeValueOrDie(); - auto expected = LiteralUtil::CreateR2<float>({{0, 4}, {2, 4}}); + auto expected = Literal::CreateR2<float>({{0, 4}, {2, 4}}); - EXPECT_TRUE(LiteralUtil::Equal(*result, *expected)); + EXPECT_TRUE(result->Equal(*expected)); } // Verifies that HloEvaluator evaluates a HLO instruction that performs select // with 3 operands. TEST_F(HloEvaluatorTest, DoesSelect) { - auto pred = LiteralUtil::CreateR2<bool>({{true, false}, {false, true}}); - auto on_true = LiteralUtil::CreateR2<float>({{2.f, 4.f}, {4.f, 4.f}}); - auto on_false = LiteralUtil::CreateR2<float>({{0.f, 5.f}, {0.f, 4.f}}); + auto pred = Literal::CreateR2<bool>({{true, false}, {false, true}}); + auto on_true = Literal::CreateR2<float>({{2.f, 4.f}, {4.f, 4.f}}); + auto on_false = Literal::CreateR2<float>({{0.f, 5.f}, {0.f, 4.f}}); Shape shape = on_true->shape(); auto c1 = HloInstruction::CreateConstant(std::move(pred)); @@ -80,16 +80,16 @@ TEST_F(HloEvaluatorTest, DoesSelect) { std::unique_ptr<Literal> result = evaluator_->Evaluate(instruction.get(), {}).ConsumeValueOrDie(); - auto expected = LiteralUtil::CreateR2<float>({{2, 5}, {0, 4}}); + auto expected = Literal::CreateR2<float>({{2, 5}, {0, 4}}); - EXPECT_TRUE(LiteralUtil::Equal(*result, *expected)); + EXPECT_TRUE(result->Equal(*expected)); } // Verifies that HloEvaluator evaluates a HLO instruction that performs // element-wise addition with 2 operands. TEST_F(HloEvaluatorTest, DoesAdd) { - auto lhs = LiteralUtil::CreateR2<int64>({{1, 0}, {-100, 4}}); - auto rhs = LiteralUtil::CreateR2<int64>({{2, 4}, {4, 4}}); + auto lhs = Literal::CreateR2<int64>({{1, 0}, {-100, 4}}); + auto rhs = Literal::CreateR2<int64>({{2, 4}, {4, 4}}); Shape shape = ShapeUtil::MakeShape(S64, {2, 2}); auto c1 = HloInstruction::CreateConstant(std::move(lhs)); @@ -100,16 +100,16 @@ TEST_F(HloEvaluatorTest, DoesAdd) { std::unique_ptr<Literal> result = evaluator_->Evaluate(instruction.get(), {}).ConsumeValueOrDie(); - auto expected = LiteralUtil::CreateR2<int64>({{3, 4}, {-96, 8}}); + auto expected = Literal::CreateR2<int64>({{3, 4}, {-96, 8}}); - EXPECT_TRUE(LiteralUtil::Equal(*result, *expected)); + EXPECT_TRUE(result->Equal(*expected)); } // Verifies that HloEvaluator evaluates a HLO instruction that performs // element-wise divide with 2 operands. TEST_F(HloEvaluatorTest, DoesDivide) { - auto lhs_s64 = LiteralUtil::CreateR2<int64>({{1, 0}, {-100, 4}}); - auto rhs_s64 = LiteralUtil::CreateR2<int64>({{2, 4}, {4, 4}}); + auto lhs_s64 = Literal::CreateR2<int64>({{1, 0}, {-100, 4}}); + auto rhs_s64 = Literal::CreateR2<int64>({{2, 4}, {4, 4}}); Shape shape_s64 = ShapeUtil::MakeShape(S64, {2, 2}); auto c1_s64 = HloInstruction::CreateConstant(std::move(lhs_s64)); @@ -120,12 +120,12 @@ TEST_F(HloEvaluatorTest, DoesDivide) { std::unique_ptr<Literal> result = evaluator_->Evaluate(instruction.get(), {}).ConsumeValueOrDie(); - auto expected = LiteralUtil::CreateR2<int64>({{0, 0}, {-25, 1}}); + auto expected = Literal::CreateR2<int64>({{0, 0}, {-25, 1}}); - EXPECT_TRUE(LiteralUtil::Equal(*result, *expected)); + EXPECT_TRUE(result->Equal(*expected)); - auto lhs_f64 = LiteralUtil::CreateR2<double>({{1.0, 0.0}, {-100.0, 4.0}}); - auto rhs_f64 = LiteralUtil::CreateR2<double>({{2.2, 4.0}, {4.0, 4.0}}); + auto lhs_f64 = Literal::CreateR2<double>({{1.0, 0.0}, {-100.0, 4.0}}); + auto rhs_f64 = Literal::CreateR2<double>({{2.2, 4.0}, {4.0, 4.0}}); Shape shape_f64 = ShapeUtil::MakeShape(F64, {2, 2}); auto c1_f64 = HloInstruction::CreateConstant(std::move(lhs_f64)); @@ -135,16 +135,15 @@ TEST_F(HloEvaluatorTest, DoesDivide) { result = evaluator_->Evaluate(instruction.get(), {}).ConsumeValueOrDie(); - expected = - LiteralUtil::CreateR2<double>({{0.45454545454545453, 0}, {-25, 1}}); + expected = Literal::CreateR2<double>({{0.45454545454545453, 0}, {-25, 1}}); - EXPECT_TRUE(LiteralUtil::Equal(*result, *expected)); + EXPECT_TRUE(result->Equal(*expected)); } // Verifies that HloEvaluator evaluates a HLO instruction that performs // element-wise abs op with 1 operand. TEST_F(HloEvaluatorTest, DoesAbs) { - auto operand = LiteralUtil::CreateR2<int64>({{1, -20}, {-100, 4}}); + auto operand = Literal::CreateR2<int64>({{1, -20}, {-100, 4}}); const Shape& shape = ShapeUtil::MakeShape(S64, {2, 2}); auto c1 = HloInstruction::CreateConstant(std::move(operand)); auto instruction = @@ -153,31 +152,31 @@ TEST_F(HloEvaluatorTest, DoesAbs) { std::unique_ptr<Literal> result = evaluator_->Evaluate(instruction.get(), {}).ConsumeValueOrDie(); - auto expected = LiteralUtil::CreateR2<int64>({{1, 20}, {100, 4}}); + auto expected = Literal::CreateR2<int64>({{1, 20}, {100, 4}}); - EXPECT_TRUE(LiteralUtil::Equal(*result, *expected)); + EXPECT_TRUE(result->Equal(*expected)); // For R0 literal. const Shape& r0 = ShapeUtil::MakeShape(F32, {}); - operand = LiteralUtil::CreateR0<float>(-1.0f); + operand = Literal::CreateR0<float>(-1.0f); c1 = HloInstruction::CreateConstant(std::move(operand)); instruction = HloInstruction::CreateUnary(r0, HloOpcode::kAbs, c1.get()); result = evaluator_->Evaluate(instruction.get()).ConsumeValueOrDie(); - expected = LiteralUtil::CreateR0<float>(1.0f); + expected = Literal::CreateR0<float>(1.0f); - EXPECT_TRUE(LiteralUtil::Equal(*result, *expected)); + EXPECT_TRUE(result->Equal(*expected)); // For R1 literal with dimension of size 0. Shape empty_r1 = ShapeUtil::MakeShape(F32, {0}); - operand = LiteralUtil::CreateR1<float>({}); + operand = Literal::CreateR1<float>({}); c1 = HloInstruction::CreateConstant(std::move(operand)); instruction = HloInstruction::CreateUnary(empty_r1, HloOpcode::kAbs, c1.get()); result = evaluator_->Evaluate(instruction.get()).ConsumeValueOrDie(); - expected = LiteralUtil::CreateR1<float>({}); + expected = Literal::CreateR1<float>({}); - EXPECT_TRUE(LiteralUtil::Equal(*result, *expected)); + EXPECT_TRUE(result->Equal(*expected)); } // namespace // Verifies that HloEvaluator evaluates a HLO Computation with non-parameter nor @@ -186,9 +185,9 @@ TEST_F(HloEvaluatorTest, DoesTraveseInstructions) { HloComputation::Builder builder( ::testing::UnitTest::GetInstance()->current_test_info()->name()); - auto lhs = LiteralUtil::CreateR2<int64>({{1, 0}, {-100, 4}}); - auto rhs = LiteralUtil::CreateR2<int64>({{2, 4}, {4, 4}}); - auto rhs2 = LiteralUtil::CreateR2<int64>({{1, -20}, {-100, 4}}); + auto lhs = Literal::CreateR2<int64>({{1, 0}, {-100, 4}}); + auto rhs = Literal::CreateR2<int64>({{2, 4}, {4, 4}}); + auto rhs2 = Literal::CreateR2<int64>({{1, -20}, {-100, 4}}); std::vector<const Literal*> args = {lhs.get(), rhs.get(), rhs2.get()}; Shape shape = ShapeUtil::MakeShape(S64, {2, 2}); @@ -206,9 +205,9 @@ TEST_F(HloEvaluatorTest, DoesTraveseInstructions) { std::unique_ptr<Literal> result = evaluator_->Evaluate(builder.Build().get(), args).ConsumeValueOrDie(); - auto expected = LiteralUtil::CreateR2<int64>({{4, -16}, {-196, 12}}); + auto expected = Literal::CreateR2<int64>({{4, -16}, {-196, 12}}); - EXPECT_TRUE(LiteralUtil::Equal(*result, *expected)); + EXPECT_TRUE(result->Equal(*expected)); } // Verifies Reshape operation is correctly evaluated. @@ -220,7 +219,7 @@ TEST_F(HloEvaluatorTest, DoesReshape) { TF_ASSIGN_OR_ASSERT_OK(auto literal, LiteralTestUtil::CreateRandomLiteral<F32>( ShapeUtil::MakeShape(F32, dimensions), 0.0, 1.0)); - auto literal_clone = LiteralUtil::CloneToUnique(*literal); + auto literal_clone = literal->CloneToUnique(); HloInstruction* literal_instruction = builder.AddInstruction( HloInstruction::CreateConstant(std::move(literal))); @@ -233,11 +232,10 @@ TEST_F(HloEvaluatorTest, DoesReshape) { evaluator_->Evaluate(builder.Build().get(), {}).ConsumeValueOrDie(); using NativeT = typename primitive_util::PrimitiveTypeToNative<F32>::type; - LiteralUtil::EachCell<NativeT>( - *result, [&](tensorflow::gtl::ArraySlice<int64> indices, NativeT value) { + result->EachCell<NativeT>( + [&](tensorflow::gtl::ArraySlice<int64> indices, NativeT value) { std::vector<int64> rindexes = Permute(permutation, indices); - EXPECT_TRUE(value == - LiteralUtil::Get<NativeT>(*literal_clone, rindexes)); + EXPECT_TRUE(value == literal_clone->Get<NativeT>(rindexes)); }); } @@ -246,8 +244,8 @@ TEST_F(HloEvaluatorTest, DoesBroadcast) { HloComputation::Builder builder( ::testing::UnitTest::GetInstance()->current_test_info()->name()); - auto input_literal = LiteralUtil::CreateR2<int32>({{1, 2}, {3, 4}, {5, 6}}); - auto output_literal = LiteralUtil::CreateR3<int32>( + auto input_literal = Literal::CreateR2<int32>({{1, 2}, {3, 4}, {5, 6}}); + auto output_literal = Literal::CreateR3<int32>( {{{1, 2}, {3, 4}, {5, 6}}, {{1, 2}, {3, 4}, {5, 6}}}); HloInstruction* literal_instruction = builder.AddInstruction( HloInstruction::CreateConstant(std::move(input_literal))); @@ -258,9 +256,9 @@ TEST_F(HloEvaluatorTest, DoesBroadcast) { std::unique_ptr<Literal> result = evaluator_->Evaluate(builder.Build().get(), {}).ConsumeValueOrDie(); - LiteralUtil::EachCell<int32>( - *result, [&](tensorflow::gtl::ArraySlice<int64> indices, int32 value) { - EXPECT_TRUE(value == LiteralUtil::Get<int32>(*output_literal, indices)); + result->EachCell<int32>( + [&](tensorflow::gtl::ArraySlice<int64> indices, int32 value) { + EXPECT_TRUE(value == output_literal->Get<int32>(indices)); }); } diff --git a/tensorflow/compiler/xla/service/hlo_graph_dumper.cc b/tensorflow/compiler/xla/service/hlo_graph_dumper.cc index 9fe4d85f8b..dffb53320c 100644 --- a/tensorflow/compiler/xla/service/hlo_graph_dumper.cc +++ b/tensorflow/compiler/xla/service/hlo_graph_dumper.cc @@ -335,8 +335,7 @@ string InstructionSequenceGraph( ShapeUtil::IsEffectiveScalar(instruction->shape())) { auto elem_idx = IndexUtil::LinearIndexToMultidimensionalIndex( instruction->shape(), /*linear_index=*/0); - StrAppend(&label, " = {", - LiteralUtil::GetAsString(instruction->literal(), elem_idx), + StrAppend(&label, " = {", instruction->literal().GetAsString(elem_idx), "}"); } diff --git a/tensorflow/compiler/xla/service/hlo_instruction.cc b/tensorflow/compiler/xla/service/hlo_instruction.cc index 33bb29e16d..fe6233205c 100644 --- a/tensorflow/compiler/xla/service/hlo_instruction.cc +++ b/tensorflow/compiler/xla/service/hlo_instruction.cc @@ -870,7 +870,7 @@ std::unique_ptr<HloInstruction> HloInstruction::CloneWithNewOperands( return CreateWhile(shape, while_condition(), while_body(), new_operands[0]); case HloOpcode::kConstant: - return CreateConstant(LiteralUtil::CloneToUnique(*literal_)); + return CreateConstant(literal_->CloneToUnique()); case HloOpcode::kFusion: return CloneFusionWithNewOperands(shape, new_operands); case HloOpcode::kParameter: @@ -1181,7 +1181,7 @@ bool HloInstruction::Identical( // A constant is defined by the value in the literal. case HloOpcode::kConstant: - return LiteralUtil::Equal(literal(), other.literal()); + return literal().Equal(other.literal()); // A convert result is determined by the primitive type that the operand is // converted into. @@ -1482,9 +1482,9 @@ string HloInstruction::ToString(bool compact_operands, if (opcode() == HloOpcode::kConstant) { // For constants, show the actual value in place of an empty operand list. if (ShapeUtil::ElementsIn(shape()) <= 10) { - // LiteralUtil::ToString emits multidimensional arrays over multiple + // Literal::ToString emits multidimensional arrays over multiple // lines. Compact this into one line by stripping out white space. - string tmp = LiteralUtil::ToString(literal()); + string tmp = literal().ToString(); std::replace(tmp.begin(), tmp.end(), '\n', ' '); std::vector<string> v = tensorflow::str_util::Split(tmp, ' '); bool first = true; diff --git a/tensorflow/compiler/xla/service/hlo_instruction_test.cc b/tensorflow/compiler/xla/service/hlo_instruction_test.cc index bcf81cd8dd..bb1b477e13 100644 --- a/tensorflow/compiler/xla/service/hlo_instruction_test.cc +++ b/tensorflow/compiler/xla/service/hlo_instruction_test.cc @@ -232,7 +232,7 @@ TEST_F(HloInstructionTest, MultipleUsersAndOperands) { // ------- auto param0 = HloInstruction::CreateParameter(0, r0f32_, "param0"); auto param1 = HloInstruction::CreateParameter(1, r0f32_, "param1"); - auto c0 = HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)); + auto c0 = HloInstruction::CreateConstant(Literal::CreateR0<float>(1.1f)); auto addleft = HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, param0.get(), c0.get()); auto addright = HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, @@ -271,7 +271,7 @@ TEST_F(HloInstructionTest, MultipleUsersAndOperandsWithUnaryOps) { // ------- auto param0 = HloInstruction::CreateParameter(0, r0f32_, "param0"); auto param1 = HloInstruction::CreateParameter(1, r0f32_, "param1"); - auto c0 = HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)); + auto c0 = HloInstruction::CreateConstant(Literal::CreateR0<float>(1.1f)); auto neg1 = HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, c0.get()); auto addleft = HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, param0.get(), neg1.get()); @@ -307,7 +307,7 @@ TEST_F(HloInstructionTest, TrivialMap) { auto param = builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32, "x")); auto value = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); builder.AddInstruction( HloInstruction::CreateBinary(r0f32, HloOpcode::kAdd, param, value)); auto add_f32 = builder.Build(); @@ -349,9 +349,8 @@ TEST_F(HloInstructionTest, TrivialReduce) { // Builds a parameter and an initial value and feeds them to the reduce. auto param0 = HloInstruction::CreateParameter(0, f32a100x10, ""); - auto const0 = - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)); - auto c0 = HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)); + auto const0 = HloInstruction::CreateConstant(Literal::CreateR0<float>(0.0f)); + auto c0 = HloInstruction::CreateConstant(Literal::CreateR0<float>(1.1f)); auto reduce = HloInstruction::CreateReduce(f32v100, param0.get(), const0.get(), /*dimensions_to_reduce=*/{1}, add_f32.get()); @@ -560,7 +559,7 @@ TEST_F(HloInstructionTest, PostProcessAllVisitedNodes) { TEST_F(HloInstructionTest, SingletonFusionOp) { // Create a fusion instruction containing a single unary operation. auto constant = - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.1f)); auto exp = HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, constant.get()); @@ -574,9 +573,9 @@ TEST_F(HloInstructionTest, SingletonFusionOp) { TEST_F(HloInstructionTest, BinaryFusionOp) { // Create a fusion instruction containing a single binary operation. auto constant1 = - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.1f)); auto constant2 = - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.1f)); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.1f)); auto add = HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, constant1.get(), constant2.get()); @@ -594,7 +593,7 @@ TEST_F(HloInstructionTest, BinaryFusionOp) { TEST_F(HloInstructionTest, ChainFusionOp) { // Create a chain of fused unary ops. auto constant = - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.1f)); auto exp1 = HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, constant.get()); auto exp2 = HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, exp1.get()); @@ -613,7 +612,7 @@ TEST_F(HloInstructionTest, ChainFusionOp) { TEST_F(HloInstructionTest, PreserveMetadataInFusionAndClone) { // Create a chain of fused unary ops. auto constant = - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.1f)); auto exp1 = HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, constant.get()); auto exp2 = HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, exp1.get()); @@ -644,7 +643,7 @@ TEST_F(HloInstructionTest, FusionOpWithCalledComputations) { std::unique_ptr<HloComputation> computation_y = make_map_computation(); auto constant = - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.1f)); auto map_1_x = HloInstruction::CreateMap(scalar_shape, {constant.get()}, computation_x.get(), /*static_operands=*/{}); @@ -681,9 +680,9 @@ TEST_F(HloInstructionTest, ComplexFusionOp) { // // Notable complexities are repeated operands in a same instruction, different // shapes, use of value in different expressions. - auto c1 = HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)); - auto c2 = HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.1f)); - auto c3 = HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(9.0f)); + auto c1 = HloInstruction::CreateConstant(Literal::CreateR0<float>(1.1f)); + auto c2 = HloInstruction::CreateConstant(Literal::CreateR0<float>(2.1f)); + auto c3 = HloInstruction::CreateConstant(Literal::CreateR0<float>(9.0f)); auto add = HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, c1.get(), c2.get()); @@ -732,11 +731,11 @@ TEST_F(HloInstructionTest, IdenticalInstructions) { // Create a set of random constant operands to use below. Make them matrices // so dimensions are interesting. auto operand1 = HloInstruction::CreateConstant( - LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})); + Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})); auto operand2 = HloInstruction::CreateConstant( - LiteralUtil::CreateR2<float>({{10.0, 20.0}, {30.0, 40.0}})); - auto vector_operand = HloInstruction::CreateConstant( - LiteralUtil::CreateR1<float>({42.0, 123.0})); + Literal::CreateR2<float>({{10.0, 20.0}, {30.0, 40.0}})); + auto vector_operand = + HloInstruction::CreateConstant(Literal::CreateR1<float>({42.0, 123.0})); Shape shape = operand1->shape(); // Convenient short names for the operands. diff --git a/tensorflow/compiler/xla/service/hlo_module_test.cc b/tensorflow/compiler/xla/service/hlo_module_test.cc index 870bc729ae..58173bca07 100644 --- a/tensorflow/compiler/xla/service/hlo_module_test.cc +++ b/tensorflow/compiler/xla/service/hlo_module_test.cc @@ -38,7 +38,7 @@ class HloModuleTest : public HloTestBase { std::unique_ptr<HloComputation> CreateConstantComputation() { auto builder = HloComputation::Builder("Constant"); builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f))); return builder.Build(); } diff --git a/tensorflow/compiler/xla/service/hlo_ordering_test.cc b/tensorflow/compiler/xla/service/hlo_ordering_test.cc index 21d852a51d..d36784e67d 100644 --- a/tensorflow/compiler/xla/service/hlo_ordering_test.cc +++ b/tensorflow/compiler/xla/service/hlo_ordering_test.cc @@ -101,7 +101,7 @@ TEST_F(HloOrderingTest, InstructionsInDifferentComputations) { auto builder_c = HloComputation::Builder("C"); HloInstruction* c = builder_c.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f))); HloComputation* computation_c = module->AddEmbeddedComputation(builder_c.Build()); diff --git a/tensorflow/compiler/xla/service/hlo_query.cc b/tensorflow/compiler/xla/service/hlo_query.cc index a153d73dbd..d45038f1f4 100644 --- a/tensorflow/compiler/xla/service/hlo_query.cc +++ b/tensorflow/compiler/xla/service/hlo_query.cc @@ -25,7 +25,7 @@ namespace hlo_query { bool IsConstantR0F32(HloInstruction* instruction, float* out) { if (instruction->opcode() == HloOpcode::kConstant && ShapeUtil::IsScalarF32(instruction->shape())) { - *out = LiteralUtil::Get<float>(instruction->literal(), {}); + *out = instruction->literal().Get<float>({}); return true; } diff --git a/tensorflow/compiler/xla/service/hlo_rematerialization_test.cc b/tensorflow/compiler/xla/service/hlo_rematerialization_test.cc index d9c2a5f0ac..8a1e705711 100644 --- a/tensorflow/compiler/xla/service/hlo_rematerialization_test.cc +++ b/tensorflow/compiler/xla/service/hlo_rematerialization_test.cc @@ -122,7 +122,7 @@ class HloRematerializationTest : public HloTestBase { builder.AddInstruction( HloInstruction::CreateParameter(0, vec1_shape_, "param")); builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true))); + HloInstruction::CreateConstant(Literal::CreateR0<bool>(true))); return builder.Build(); } @@ -211,7 +211,7 @@ TEST_F(HloRematerializationTest, RematerializeAroundWhile) { cond_builder.AddInstruction( HloInstruction::CreateParameter(0, vec1_shape_, "param")); cond_builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true))); + HloInstruction::CreateConstant(Literal::CreateR0<bool>(true))); HloComputation* while_cond = module->AddEmbeddedComputation(cond_builder.Build()); @@ -250,7 +250,7 @@ TEST_F(HloRematerializationTest, RematerializeEntryAndWhileBody) { cond_builder.AddInstruction( HloInstruction::CreateParameter(0, vec1_shape_, "param")); cond_builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true))); + HloInstruction::CreateConstant(Literal::CreateR0<bool>(true))); HloComputation* while_cond = module->AddEmbeddedComputation(cond_builder.Build()); @@ -285,7 +285,7 @@ TEST_F(HloRematerializationTest, RematerializeNestedComputations) { cond_builder.AddInstruction( HloInstruction::CreateParameter(0, vec1_shape_, "param")); cond_builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true))); + HloInstruction::CreateConstant(Literal::CreateR0<bool>(true))); HloComputation* while_cond = module->AddEmbeddedComputation(cond_builder.Build()); diff --git a/tensorflow/compiler/xla/service/hlo_subcomputation_unification_test.cc b/tensorflow/compiler/xla/service/hlo_subcomputation_unification_test.cc index 867ebc7f61..c98856b192 100644 --- a/tensorflow/compiler/xla/service/hlo_subcomputation_unification_test.cc +++ b/tensorflow/compiler/xla/service/hlo_subcomputation_unification_test.cc @@ -75,7 +75,7 @@ TEST_F(HloSubcomputationUnificationTest, UnifyIdentities) { module->AddEmbeddedComputation(CreateR0S32IdentityComputation()); auto constant = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(5))); + HloInstruction::CreateConstant(Literal::CreateR0<int32>(5))); auto x = builder.AddInstruction( HloInstruction::CreateCall(r0s32_, {constant}, callee1)); auto y = builder.AddInstruction( @@ -110,9 +110,9 @@ TEST_F(HloSubcomputationUnificationTest, UnifyAdditions) { module->AddEmbeddedComputation(CreateR0S32AdditionComputation()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(5))); + HloInstruction::CreateConstant(Literal::CreateR0<int32>(5))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(3))); + HloInstruction::CreateConstant(Literal::CreateR0<int32>(3))); auto x = builder.AddInstruction( HloInstruction::CreateCall(r0s32_, {constant1, constant2}, callee1)); auto y = builder.AddInstruction( diff --git a/tensorflow/compiler/xla/service/hlo_tfgraph_builder.cc b/tensorflow/compiler/xla/service/hlo_tfgraph_builder.cc index 6707b02c5c..76177462aa 100644 --- a/tensorflow/compiler/xla/service/hlo_tfgraph_builder.cc +++ b/tensorflow/compiler/xla/service/hlo_tfgraph_builder.cc @@ -171,8 +171,7 @@ void HloTfGraphBuilder::SetNodeAttrs(const HloInstruction* instruction, break; case HloOpcode::kConstant: if (ShapeUtil::IsScalar(instruction->shape())) { - attrs["value"].set_s( - LiteralUtil::GetAsString(instruction->literal(), {})); + attrs["value"].set_s(instruction->literal().GetAsString({})); } break; case HloOpcode::kCustomCall: diff --git a/tensorflow/compiler/xla/service/hlo_tfgraph_builder_test.cc b/tensorflow/compiler/xla/service/hlo_tfgraph_builder_test.cc index c2718ea800..8e9d93e367 100644 --- a/tensorflow/compiler/xla/service/hlo_tfgraph_builder_test.cc +++ b/tensorflow/compiler/xla/service/hlo_tfgraph_builder_test.cc @@ -91,7 +91,7 @@ TEST_F(HloTfGraphBuilderTest, CheckConcatenateDimsAndShapes) { TEST_F(HloTfGraphBuilderTest, CheckScalarValue) { auto builder = HloComputation::Builder("Const"); HloInstruction *instruction = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0(123))); + HloInstruction::CreateConstant(Literal::CreateR0(123))); OpMetadata metadata; metadata.set_op_name("x"); metadata.set_op_type("y"); diff --git a/tensorflow/compiler/xla/service/inliner_test.cc b/tensorflow/compiler/xla/service/inliner_test.cc index 2887a8a0a0..84bfbb30c3 100644 --- a/tensorflow/compiler/xla/service/inliner_test.cc +++ b/tensorflow/compiler/xla/service/inliner_test.cc @@ -51,10 +51,10 @@ TEST_F(InlinerTest, MapMax) { auto max_f32 = max_builder.Build(); auto builder = HloComputation::Builder("MapMaxFunction"); - auto lhs = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR1<float>({1, 2, 3, 4}))); - auto rhs = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR1<float>({4, 3, 2, 1}))); + auto lhs = builder.AddInstruction( + HloInstruction::CreateConstant(Literal::CreateR1<float>({1, 2, 3, 4}))); + auto rhs = builder.AddInstruction( + HloInstruction::CreateConstant(Literal::CreateR1<float>({4, 3, 2, 1}))); builder.AddInstruction( HloInstruction::CreateMap(lhs->shape(), {lhs, rhs}, max_f32.get())); @@ -70,7 +70,7 @@ TEST_F(InlinerTest, MapMax) { // Verify execution on CPU. auto result = ExecuteAndTransfer(std::move(hlo_module), {}); - auto expected = LiteralUtil::CreateR1<float>({4, 3, 3, 4}); + auto expected = Literal::CreateR1<float>({4, 3, 3, 4}); LiteralTestUtil::ExpectEqual(*result, *expected); } @@ -83,12 +83,12 @@ TEST_F(InlinerTest, MapConstant) { HloInstruction::CreateParameter(0, r0f32, "x")); (void)param1; const2_builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0f))); auto const2_f32 = const2_builder.Build(); auto builder = HloComputation::Builder("MapConstFunction"); auto lhs = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<float>({{1, 2, 3, 4}, {5, 6, 7, 8}}))); + Literal::CreateR2<float>({{1, 2, 3, 4}, {5, 6, 7, 8}}))); builder.AddInstruction( HloInstruction::CreateMap(lhs->shape(), {lhs}, const2_f32.get())); @@ -104,7 +104,7 @@ TEST_F(InlinerTest, MapConstant) { // Verify execution on CPU. auto result = ExecuteAndTransfer(std::move(hlo_module), {}); - auto expected = LiteralUtil::CreateR2<float>({{2, 2, 2, 2}, {2, 2, 2, 2}}); + auto expected = Literal::CreateR2<float>({{2, 2, 2, 2}, {2, 2, 2, 2}}); LiteralTestUtil::ExpectEqual(*result, *expected); } diff --git a/tensorflow/compiler/xla/service/instruction_fusion_test.cc b/tensorflow/compiler/xla/service/instruction_fusion_test.cc index a2e6c2ae00..b3e0007dcc 100644 --- a/tensorflow/compiler/xla/service/instruction_fusion_test.cc +++ b/tensorflow/compiler/xla/service/instruction_fusion_test.cc @@ -28,7 +28,7 @@ TEST_F(InstructionFusionTest, CostlyProducerAndOperandElementReusingConsumerNotFused) { HloComputation::Builder builder(TestName()); HloInstruction* const0 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0(5))); + HloInstruction::CreateConstant(Literal::CreateR0(5))); HloInstruction* exp1 = builder.AddInstruction(HloInstruction::CreateUnary( ShapeUtil::MakeShape(S32, {}), HloOpcode::kExp, const0)); HloInstruction* broadcast2 = @@ -49,7 +49,7 @@ TEST_F(InstructionFusionTest, NonCostlyProducerAndOperandElementReusingConsumerFused) { HloComputation::Builder builder(TestName()); HloInstruction* const0 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0(5))); + HloInstruction::CreateConstant(Literal::CreateR0(5))); HloInstruction* negate1 = builder.AddInstruction(HloInstruction::CreateUnary( ShapeUtil::MakeShape(S32, {}), HloOpcode::kNegate, const0)); HloInstruction* broadcast2 = @@ -70,7 +70,7 @@ TEST_F(InstructionFusionTest, CostlyProducerAndNonOperandElementReusingConsumerFused_Reshape) { HloComputation::Builder builder(TestName()); HloInstruction* const0 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0(5))); + HloInstruction::CreateConstant(Literal::CreateR0(5))); HloInstruction* exp1 = builder.AddInstruction(HloInstruction::CreateUnary( ShapeUtil::MakeShape(S32, {}), HloOpcode::kExp, const0)); HloInstruction* reshape2 = builder.AddInstruction( @@ -90,7 +90,7 @@ TEST_F(InstructionFusionTest, CostlyProducerAndNonOperandElementReusingConsumerFused_Transpose) { HloComputation::Builder builder(TestName()); HloInstruction* const0 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0(5))); + HloInstruction::CreateConstant(Literal::CreateR0(5))); HloInstruction* exp1 = builder.AddInstruction(HloInstruction::CreateUnary( ShapeUtil::MakeShape(S32, {}), HloOpcode::kExp, const0)); HloInstruction* transpose2 = builder.AddInstruction( diff --git a/tensorflow/compiler/xla/service/layout_assignment_test.cc b/tensorflow/compiler/xla/service/layout_assignment_test.cc index 6d818cdea0..a83466d807 100644 --- a/tensorflow/compiler/xla/service/layout_assignment_test.cc +++ b/tensorflow/compiler/xla/service/layout_assignment_test.cc @@ -230,7 +230,7 @@ TEST_F(LayoutAssignmentTest, TupleSelect) { HloInstruction::CreateTuple({constant0, constant1})); auto pred = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true))); + HloInstruction::CreateConstant(Literal::CreateR0<bool>(true))); auto select = builder.AddInstruction(HloInstruction::CreateTernary( tuple0->shape(), HloOpcode::kSelect, pred, tuple0, tuple1)); @@ -264,7 +264,7 @@ TEST_F(LayoutAssignmentTest, ConflictingLayoutTuple) { // tuple and assigning the layouts of the copied arrays as needed. auto builder = HloComputation::Builder(TestName()); auto constant = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}))); + Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}))); auto inner_tuple = builder.AddInstruction(HloInstruction::CreateTuple({constant})); auto nested_tuple = builder.AddInstruction( diff --git a/tensorflow/compiler/xla/service/liveness_util_test.cc b/tensorflow/compiler/xla/service/liveness_util_test.cc index bad4be149a..14d831f289 100644 --- a/tensorflow/compiler/xla/service/liveness_util_test.cc +++ b/tensorflow/compiler/xla/service/liveness_util_test.cc @@ -85,9 +85,9 @@ TEST_F(DoesNotUseOperandBufferTest, FusedDynamicUpdateSlice) { // Create a DynamicUpdateSlice instruction of tuple element 1. auto starts = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR1<int32>({2}))); + HloInstruction::CreateConstant(Literal::CreateR1<int32>({2}))); auto update = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR1<float>({2.f, 2.f, 2.f}))); + Literal::CreateR1<float>({2.f, 2.f, 2.f}))); auto dynamic_update_slice = builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice( data_shape, gte1, update, starts)); @@ -180,9 +180,9 @@ TEST_F(CanShareOperandBufferWithUserTest, FusedDynamicUpdateSlice) { // Create a DynamicUpdateSlice instruction of tuple element 1. auto starts = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR1<int32>({2}))); + HloInstruction::CreateConstant(Literal::CreateR1<int32>({2}))); auto update = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR1<float>({2.f, 2.f, 2.f}))); + Literal::CreateR1<float>({2.f, 2.f, 2.f}))); auto dynamic_update_slice = builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice( data_shape, gte1, update, starts)); @@ -234,15 +234,15 @@ TEST_F(CanShareOperandBufferWithUserTest, FusedDotAdd) { Shape data_shape = ShapeUtil::MakeShape(F32, {2, 2}); auto a = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<float>({{1.0, 0.0}, {0.0, 1.0}}))); + Literal::CreateR2<float>({{1.0, 0.0}, {0.0, 1.0}}))); auto b = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<float>({{2.0, 2.0}, {2.0, 2.0}}))); + Literal::CreateR2<float>({{2.0, 2.0}, {2.0, 2.0}}))); auto dot = builder.AddInstruction( HloInstruction::CreateBinary(data_shape, HloOpcode::kDot, a, b)); auto one = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto add_operand = builder.AddInstruction( HloInstruction::CreateBroadcast(data_shape, one, {1})); @@ -264,9 +264,9 @@ TEST_F(CanShareOperandBufferWithUserTest, FusedTransposeDotAdd) { Shape data_shape = ShapeUtil::MakeShape(F32, {2, 2}); auto a = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<float>({{1.0, 0.0}, {0.0, 1.0}}))); + Literal::CreateR2<float>({{1.0, 0.0}, {0.0, 1.0}}))); auto b = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<float>({{2.0, 2.0}, {2.0, 2.0}}))); + Literal::CreateR2<float>({{2.0, 2.0}, {2.0, 2.0}}))); auto b_t = builder.AddInstruction( HloInstruction::CreateTranspose(data_shape, b, {1, 0})); @@ -274,7 +274,7 @@ TEST_F(CanShareOperandBufferWithUserTest, FusedTransposeDotAdd) { HloInstruction::CreateBinary(data_shape, HloOpcode::kDot, a, b_t)); auto one = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto add_operand = builder.AddInstruction( HloInstruction::CreateBroadcast(data_shape, one, {1})); @@ -300,7 +300,7 @@ TEST_F(CanShareOperandBufferWithUserTest, OutputFusionCantAliasOperandBuffer) { Shape data_shape = ShapeUtil::MakeShape(F32, {2, 2}); auto one = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto operand = builder.AddInstruction( HloInstruction::CreateBroadcast(data_shape, one, {1})); @@ -308,7 +308,7 @@ TEST_F(CanShareOperandBufferWithUserTest, OutputFusionCantAliasOperandBuffer) { HloInstruction::CreateReverse(data_shape, operand, {0, 1})); auto two = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<float>({{2.0, 2.0}, {2.0, 2.0}}))); + Literal::CreateR2<float>({{2.0, 2.0}, {2.0, 2.0}}))); auto add = builder.AddInstruction( HloInstruction::CreateBinary(data_shape, HloOpcode::kAdd, reverse, two)); diff --git a/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc b/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc index 99ad107a90..d9e233af2a 100644 --- a/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc +++ b/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc @@ -163,36 +163,36 @@ llvm::Constant* LiteralToConstant(const Literal& literal, int64 dimension_index, llvm::Constant* value; switch (shape.element_type()) { case PRED: - value = llvm::ConstantInt::get( - ir_element_type, LiteralUtil::Get<bool>(literal, *multi_index)); + value = llvm::ConstantInt::get(ir_element_type, + literal.Get<bool>(*multi_index)); break; case U8: - value = llvm::ConstantInt::get( - ir_element_type, LiteralUtil::Get<uint8>(literal, *multi_index)); + value = llvm::ConstantInt::get(ir_element_type, + literal.Get<uint8>(*multi_index)); break; case S32: - value = llvm::ConstantInt::get( - ir_element_type, LiteralUtil::Get<int32>(literal, *multi_index)); + value = llvm::ConstantInt::get(ir_element_type, + literal.Get<int32>(*multi_index)); break; case U32: - value = llvm::ConstantInt::get( - ir_element_type, LiteralUtil::Get<uint32>(literal, *multi_index)); + value = llvm::ConstantInt::get(ir_element_type, + literal.Get<uint32>(*multi_index)); break; case S64: - value = llvm::ConstantInt::get( - ir_element_type, LiteralUtil::Get<int64>(literal, *multi_index)); + value = llvm::ConstantInt::get(ir_element_type, + literal.Get<int64>(*multi_index)); break; case U64: - value = llvm::ConstantInt::get( - ir_element_type, LiteralUtil::Get<uint64>(literal, *multi_index)); + value = llvm::ConstantInt::get(ir_element_type, + literal.Get<uint64>(*multi_index)); break; case F32: - value = llvm::ConstantFP::get( - ir_element_type, LiteralUtil::Get<float>(literal, *multi_index)); + value = llvm::ConstantFP::get(ir_element_type, + literal.Get<float>(*multi_index)); break; case F64: - value = llvm::ConstantFP::get( - ir_element_type, LiteralUtil::Get<double>(literal, *multi_index)); + value = llvm::ConstantFP::get(ir_element_type, + literal.Get<double>(*multi_index)); break; default: LOG(FATAL) << "unsupported type " << shape.element_type(); diff --git a/tensorflow/compiler/xla/service/reshape_mover_test.cc b/tensorflow/compiler/xla/service/reshape_mover_test.cc index 9becdb2bed..49c1755520 100644 --- a/tensorflow/compiler/xla/service/reshape_mover_test.cc +++ b/tensorflow/compiler/xla/service/reshape_mover_test.cc @@ -84,7 +84,7 @@ TEST_F(ReshapeMoverTest, 1ConstantAnd1ReshapesOnRngNotMoved) { builder.AddInstruction(HloInstruction::CreateReshape(root_shape, rng0)); auto const1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateFromShape(root_shape))); + HloInstruction::CreateConstant(Literal::CreateFromShape(root_shape))); builder.AddInstruction(HloInstruction::CreateBinary( root_shape, HloOpcode::kAdd, reshape0, const1)); @@ -179,9 +179,8 @@ TEST_F(ReshapeMoverTest, EquivalentReshapesMoved) { TEST_F(ReshapeMoverTest, 1ConstantAnd2ReshapesMoved) { HloComputation::Builder builder(TestName()); auto root_shape = ShapeUtil::MakeShape(F32, {2, 3}); - auto const0 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR2<bool>( - {{true, true, false}, {false, false, true}}))); + auto const0 = builder.AddInstruction(HloInstruction::CreateConstant( + Literal::CreateR2<bool>({{true, true, false}, {false, false, true}}))); auto param1 = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {1, 3, 1, 2}), "param1")); @@ -263,12 +262,12 @@ TEST_F(ReshapeMoverTest, 2TrivialConstantReshapeNotMoved) { HloComputation::Builder builder(TestName()); auto root_shape = ShapeUtil::MakeShape(F32, {2, 3}); auto const0 = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<float>({{1, 2, 3}, {4, 5, 6}}))); + Literal::CreateR2<float>({{1, 2, 3}, {4, 5, 6}}))); auto reshape0 = builder.AddInstruction(HloInstruction::CreateReshape(root_shape, const0)); auto const1 = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<float>({{1, 2, 3}, {4, 5, 6}}))); + Literal::CreateR2<float>({{1, 2, 3}, {4, 5, 6}}))); auto reshape1 = builder.AddInstruction(HloInstruction::CreateReshape(root_shape, const1)); @@ -318,7 +317,7 @@ TEST_F(ReshapeMoverTest, 1NonTrivialReshapeMoved) { auto param0 = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {1, 3, 1, 2}), "param0")); auto const1 = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<float>({{1, 2, 3}, {4, 5, 6}}))); + Literal::CreateR2<float>({{1, 2, 3}, {4, 5, 6}}))); auto reshape0 = builder.AddInstruction(HloInstruction::CreateReshape(root_shape, param0)); builder.AddInstruction(HloInstruction::CreateBinary( @@ -464,7 +463,7 @@ TEST_F(ReshapeMoverTest, ImplicitlyBroadcastReshapeIsNotMovedBug37787999) { auto reshape = builder.AddInstruction(HloInstruction::CreateReshape( ShapeUtil::MakeShape(F32, {128, 1}), param0)); Array2D<float> a(128, 1024); - auto literal = LiteralUtil::CreateR2FromArray2D<float>(a); + auto literal = Literal::CreateR2FromArray2D<float>(a); auto constant = builder.AddInstruction( HloInstruction::CreateConstant(std::move(literal))); auto multiply = builder.AddInstruction(HloInstruction::CreateBinary( diff --git a/tensorflow/compiler/xla/service/transfer_manager_test.cc b/tensorflow/compiler/xla/service/transfer_manager_test.cc index ca38601d91..29ecef9510 100644 --- a/tensorflow/compiler/xla/service/transfer_manager_test.cc +++ b/tensorflow/compiler/xla/service/transfer_manager_test.cc @@ -55,7 +55,7 @@ class CpuTransferManagerTest : public ::testing::Test { TEST_F(CpuTransferManagerTest, TransferR0U32ToDevice) { std::vector<uint8> storage(sizeof(uint32), '\x00'); se::DeviceMemoryBase memptr(storage.data(), storage.size()); - std::unique_ptr<Literal> literal = LiteralUtil::CreateR0<uint32>(42); + std::unique_ptr<Literal> literal = Literal::CreateR0<uint32>(42); TF_CHECK_OK(transfer_manager_.TransferLiteralToDevice(stream_exec_, *literal, &memptr)); @@ -66,7 +66,7 @@ TEST_F(CpuTransferManagerTest, TransferR1F32ToDevice) { std::vector<uint8> storage(4 * sizeof(float), '\x00'); se::DeviceMemoryBase memptr(storage.data(), storage.size()); std::unique_ptr<Literal> literal = - LiteralUtil::CreateR1<float>({1.25f, 2.5f, -17.0f, -20.125f}); + Literal::CreateR1<float>({1.25f, 2.5f, -17.0f, -20.125f}); TF_CHECK_OK(transfer_manager_.TransferLiteralToDevice(stream_exec_, *literal, &memptr)); @@ -80,7 +80,7 @@ TEST_F(CpuTransferManagerTest, TransferR1U8ToDevice) { std::vector<uint8> storage(16, '\x00'); se::DeviceMemoryBase memptr(storage.data(), storage.size()); const char* str = "0123456789abcdef"; - std::unique_ptr<Literal> literal = LiteralUtil::CreateR1U8(str); + std::unique_ptr<Literal> literal = Literal::CreateR1U8(str); TF_CHECK_OK(transfer_manager_.TransferLiteralToDevice(stream_exec_, *literal, &memptr)); diff --git a/tensorflow/compiler/xla/service/transpose_folding_test.cc b/tensorflow/compiler/xla/service/transpose_folding_test.cc index c72d127ea8..9520c42d28 100644 --- a/tensorflow/compiler/xla/service/transpose_folding_test.cc +++ b/tensorflow/compiler/xla/service/transpose_folding_test.cc @@ -92,11 +92,11 @@ TEST_F(TransposeFoldingTest, FoldDotTransposeConstant) { auto builder = HloComputation::Builder("entry_computation"); // 2x1 HloInstruction* const0 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR2<float>({{1}, {2}}))); + HloInstruction::CreateConstant(Literal::CreateR2<float>({{1}, {2}}))); // 3x2 HloInstruction* const1 = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<float>({{1, 2}, {3, 4}, {5, 6}}))); + Literal::CreateR2<float>({{1, 2}, {3, 4}, {5, 6}}))); HloInstruction* transpose0 = builder.AddInstruction(HloInstruction::CreateTranspose( ShapeUtil::MakeShape(F32, {1, 2}), const0, {1, 0})); @@ -130,11 +130,11 @@ TEST_F(TransposeFoldingTest, FuseDotWithConstantOperands) { auto builder = HloComputation::Builder("entry"); // (1.0 + 2.0) * (2.0 - 3.0) HloInstruction* const1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); HloInstruction* const2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); HloInstruction* const3 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(3.0))); HloInstruction* add = builder.AddInstruction(HloInstruction::CreateBinary( const1->shape(), HloOpcode::kAdd, const1, const2)); HloInstruction* sub = builder.AddInstruction(HloInstruction::CreateBinary( diff --git a/tensorflow/compiler/xla/service/tuple_points_to_analysis_test.cc b/tensorflow/compiler/xla/service/tuple_points_to_analysis_test.cc index f75487dd74..d25e5adee3 100644 --- a/tensorflow/compiler/xla/service/tuple_points_to_analysis_test.cc +++ b/tensorflow/compiler/xla/service/tuple_points_to_analysis_test.cc @@ -124,9 +124,9 @@ class TuplePointsToAnalysisTest : public HloTestBase { TEST_F(TuplePointsToAnalysisTest, SimpleTuple) { auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); @@ -177,14 +177,14 @@ TEST_F(TuplePointsToAnalysisTest, NestedTuple) { // tuple. auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto inner_tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); auto constant3 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(3.0))); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({inner_tuple, constant3})); @@ -238,14 +238,14 @@ TEST_F(TuplePointsToAnalysisTest, GetTupleElement) { // tuple. auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto inner_tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); auto constant3 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(3.0))); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({inner_tuple, constant3})); @@ -270,7 +270,7 @@ TEST_F(TuplePointsToAnalysisTest, DuplicatedElement) { // Create a tuple which contains duplicate elements. auto builder = HloComputation::Builder(TestName()); auto constant = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant, constant, constant})); @@ -291,9 +291,9 @@ TEST_F(TuplePointsToAnalysisTest, TupleCopy) { // the same. auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); auto copy = builder.AddInstruction( @@ -318,16 +318,16 @@ TEST_F(TuplePointsToAnalysisTest, TupleSelect) { // set containing the union of both sides. auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto tuple1 = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); auto tuple2 = builder.AddInstruction( HloInstruction::CreateTuple({constant2, constant2})); auto pred = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); + HloInstruction::CreateConstant(Literal::CreateR0<bool>(false))); auto select = builder.AddInstruction(HloInstruction::CreateTernary( tuple1->shape(), HloOpcode::kSelect, pred, tuple1, tuple2)); @@ -356,7 +356,7 @@ TEST_F(TuplePointsToAnalysisTest, SelectTupleParameters) { auto param1 = builder.AddInstruction( HloInstruction::CreateParameter(1, tuple_shape, "param1")); auto pred = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); + HloInstruction::CreateConstant(Literal::CreateR0<bool>(false))); auto select = builder.AddInstruction(HloInstruction::CreateTernary( tuple_shape, HloOpcode::kSelect, pred, param0, param1)); auto copy = builder.AddInstruction( @@ -396,16 +396,16 @@ TEST_F(TuplePointsToAnalysisTest, UnambiguousTupleSelect) { // Select from two identical tuples. The result should not be ambiguous. auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto tuple1 = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); auto tuple2 = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); auto pred = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); + HloInstruction::CreateConstant(Literal::CreateR0<bool>(false))); auto select = builder.AddInstruction(HloInstruction::CreateTernary( tuple1->shape(), HloOpcode::kSelect, pred, tuple1, tuple2)); @@ -427,9 +427,9 @@ TEST_F(TuplePointsToAnalysisTest, NestedTupleSelect) { // the right values. auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto inner_tuple1 = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); auto inner_tuple2 = builder.AddInstruction( @@ -441,7 +441,7 @@ TEST_F(TuplePointsToAnalysisTest, NestedTupleSelect) { builder.AddInstruction(HloInstruction::CreateTuple({inner_tuple2})); auto pred = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))); + HloInstruction::CreateConstant(Literal::CreateR0<bool>(false))); auto select = builder.AddInstruction(HloInstruction::CreateTernary( tuple1->shape(), HloOpcode::kSelect, pred, tuple1, tuple2)); @@ -474,9 +474,9 @@ TEST_F(TuplePointsToAnalysisTest, TupleWithBitcast) { // have the operand of the bitcast in its points-to set. auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto bitcast = builder.AddInstruction(HloInstruction::CreateUnary( constant2->shape(), HloOpcode::kBitcast, constant2)); auto tuple = @@ -510,10 +510,9 @@ TEST_F(TuplePointsToAnalysisTest, PointsToTupleConstantElements) { // Construct a tuple constant and kCopy it. Verify the points-to set of the // copy correctly correctly points into the nested elements of the constant. auto builder = HloComputation::Builder(TestName()); - auto tuple_constant = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::MakeTuple( - {LiteralUtil::CreateR2<float>({{1.0}, {2.0}}).get(), - LiteralUtil::CreateR1<float>({2.0, 42}).get()}))); + auto tuple_constant = builder.AddInstruction(HloInstruction::CreateConstant( + Literal::MakeTuple({Literal::CreateR2<float>({{1.0}, {2.0}}).get(), + Literal::CreateR1<float>({2.0, 42}).get()}))); auto copy = builder.AddInstruction(HloInstruction::CreateUnary( tuple_constant->shape(), HloOpcode::kCopy, tuple_constant)); @@ -533,9 +532,9 @@ TEST_F(TuplePointsToAnalysisTest, BufferAliases) { // times. Verify buffer alias sets. auto builder = HloComputation::Builder(TestName()); auto constant1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0))); auto constant2 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(2.0))); auto inner_tuple = builder.AddInstruction( HloInstruction::CreateTuple({constant1, constant2})); auto tuple = builder.AddInstruction( @@ -574,7 +573,7 @@ class FusionPointsToAnalysisTest : public TuplePointsToAnalysisTest { auto tuple_element1 = builder.AddInstruction( HloInstruction::CreateGetTupleElement(update_shape, tuple_param0, 1)); auto ones = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR1<float>({1.f, 1.f, 1.f, 1.f}))); + Literal::CreateR1<float>({1.f, 1.f, 1.f, 1.f}))); // Create 'update' = Add(GetTupleElement(tuple_param0, 1), ones) auto update = builder.AddInstruction(HloInstruction::CreateBinary( update_shape, HloOpcode::kAdd, tuple_element1, ones)); diff --git a/tensorflow/compiler/xla/service/user_computation.cc b/tensorflow/compiler/xla/service/user_computation.cc index 2387e02f03..1f6e789379 100644 --- a/tensorflow/compiler/xla/service/user_computation.cc +++ b/tensorflow/compiler/xla/service/user_computation.cc @@ -2373,7 +2373,7 @@ void ComputationLowerer::Visit( const ConstantRequest& constant_request = request.request().constant_request(); hlo_instruction = add_instruction(HloInstruction::CreateConstant( - LiteralUtil::CloneToUnique(Literal(constant_request.literal())))); + Literal(constant_request.literal()).CloneToUnique())); break; } diff --git a/tensorflow/compiler/xla/service/user_computation_test.cc b/tensorflow/compiler/xla/service/user_computation_test.cc index ea69120126..41bb641f43 100644 --- a/tensorflow/compiler/xla/service/user_computation_test.cc +++ b/tensorflow/compiler/xla/service/user_computation_test.cc @@ -50,7 +50,7 @@ TEST_F(UserComputationTest, SimpleComputation) { ConstantRequest constant_request; *constant_request.mutable_literal() = - LiteralUtil::CreateR1<float>({123.0f, 42.0f})->ToProto(); + Literal::CreateR1<float>({123.0f, 42.0f})->ToProto(); TF_ASSIGN_OR_ASSERT_OK(ComputationDataHandle constant_handle, computation.AddConstantInstruction(constant_request)); @@ -161,12 +161,12 @@ TEST_F(UserComputationTest, EliminateScalarBroadcast) { ConstantRequest a_request; *a_request.mutable_literal() = - LiteralUtil::CreateR1<float>({123.0f, 42.0f})->ToProto(); + Literal::CreateR1<float>({123.0f, 42.0f})->ToProto(); TF_ASSIGN_OR_ASSERT_OK(ComputationDataHandle a_handle, computation.AddConstantInstruction(a_request)); ConstantRequest b_request; - *b_request.mutable_literal() = LiteralUtil::CreateR0<float>(1.0f)->ToProto(); + *b_request.mutable_literal() = Literal::CreateR0<float>(1.0f)->ToProto(); TF_ASSIGN_OR_ASSERT_OK(ComputationDataHandle b_handle, computation.AddConstantInstruction(b_request)); diff --git a/tensorflow/compiler/xla/tests/array_elementwise_ops_test.cc b/tensorflow/compiler/xla/tests/array_elementwise_ops_test.cc index f43d4b6f57..25d9fbf8c4 100644 --- a/tensorflow/compiler/xla/tests/array_elementwise_ops_test.cc +++ b/tensorflow/compiler/xla/tests/array_elementwise_ops_test.cc @@ -157,13 +157,13 @@ TEST_P(ArrayElementwiseOpTestParamCount, AddManyValues) { b_values.push_back(2 * i / static_cast<float>(count + 2)); } - std::unique_ptr<Literal> a_literal = LiteralUtil::CreateR1<float>({a_values}); + std::unique_ptr<Literal> a_literal = Literal::CreateR1<float>({a_values}); std::unique_ptr<GlobalData> a_data = client_->TransferToServer(*a_literal).ConsumeValueOrDie(); auto a_constant = builder.ConstantR1<float>(a_values); auto a_param = builder.Parameter(0, a_literal->shape(), "a_param"); - std::unique_ptr<Literal> b_literal = LiteralUtil::CreateR1<float>({b_values}); + std::unique_ptr<Literal> b_literal = Literal::CreateR1<float>({b_values}); std::unique_ptr<GlobalData> b_data = client_->TransferToServer(*b_literal).ConsumeValueOrDie(); auto b_constant = builder.Parameter(1, a_literal->shape(), "b_param"); @@ -803,7 +803,7 @@ TEST_F(ArrayElementwiseOpTest, PowSpecialF32) { std::vector<float> values = {1.0f, 2.0f, 3.2f, -4.0f}; std::vector<float> exponents = {0.0f, 1.0f, 2.0f, 0.5f, -1.0f, -0.5f}; - std::unique_ptr<Literal> param_literal = LiteralUtil::CreateR1<float>(values); + std::unique_ptr<Literal> param_literal = Literal::CreateR1<float>(values); std::unique_ptr<GlobalData> param_data = client_->TransferToServer(*param_literal).ConsumeValueOrDie(); @@ -1240,12 +1240,12 @@ TEST_F(ArrayElementwiseOpTest, AddTwoParametersF32s) { ComputationBuilder builder(client_, TestName()); std::unique_ptr<Literal> param0_literal = - LiteralUtil::CreateR1<float>({1.1f, 2.2f, 3.3f, 5.5f}); + Literal::CreateR1<float>({1.1f, 2.2f, 3.3f, 5.5f}); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); std::unique_ptr<Literal> param1_literal = - LiteralUtil::CreateR1<float>({7.2f, 2.3f, 3.4f, 5.6f}); + Literal::CreateR1<float>({7.2f, 2.3f, 3.4f, 5.6f}); std::unique_ptr<GlobalData> param1_data = client_->TransferToServer(*param1_literal).ConsumeValueOrDie(); @@ -1262,12 +1262,12 @@ XLA_TEST_F(ArrayElementwiseOpTest, AddTwoParametersZeroElementF32s) { ComputationBuilder builder(client_, TestName()); std::unique_ptr<Literal> param0_literal = - LiteralUtil::CreateR3FromArray3D<float>(Array3D<float>(0, 7, 0)); + Literal::CreateR3FromArray3D<float>(Array3D<float>(0, 7, 0)); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); std::unique_ptr<Literal> param1_literal = - LiteralUtil::CreateR3FromArray3D<float>(Array3D<float>(0, 7, 0)); + Literal::CreateR3FromArray3D<float>(Array3D<float>(0, 7, 0)); std::unique_ptr<GlobalData> param1_data = client_->TransferToServer(*param1_literal).ConsumeValueOrDie(); @@ -1284,7 +1284,7 @@ TEST_F(ArrayElementwiseOpTest, AddParameterToConstantF32s) { ComputationBuilder builder(client_, TestName()); std::unique_ptr<Literal> param0_literal = - LiteralUtil::CreateR1<float>({1.1f, 2.2f, 3.3f, 5.5f}); + Literal::CreateR1<float>({1.1f, 2.2f, 3.3f, 5.5f}); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); @@ -1455,9 +1455,9 @@ XLA_TEST_F(ArrayElementwiseOpTest, Compare1DTo2DS32Eq) { auto cmp_dim_1 = builder.Eq(v, m, /*broadcast_dimensions=*/{0}); auto result = builder.Tuple({cmp_dim_0, cmp_dim_1}); - auto expected = LiteralUtil::MakeTuple( - {LiteralUtil::CreateR2<bool>({{true, true}, {true, false}}).get(), - LiteralUtil::CreateR2<bool>({{true, false}, {false, false}}).get()}); + auto expected = Literal::MakeTuple( + {Literal::CreateR2<bool>({{true, true}, {true, false}}).get(), + Literal::CreateR2<bool>({{true, false}, {false, false}}).get()}); ComputeAndCompareTuple(&builder, *expected, {}, error_spec_); } @@ -1810,7 +1810,7 @@ TEST_F(ArrayElementwiseOpTest, R4_16x16x2x2_Plus_R1_16) { std::iota(r1.begin(), r1.end(), 1.0); ComputationBuilder builder(client_, TestName()); - std::unique_ptr<Literal> a_literal = LiteralUtil::CreateR4FromArray4D(r4); + std::unique_ptr<Literal> a_literal = Literal::CreateR4FromArray4D(r4); *a_literal->mutable_shape()->mutable_layout() = LayoutUtil::MakeLayout({0, 1, 2, 3}); auto a = builder.ConstantLiteral(*a_literal); @@ -1846,8 +1846,8 @@ TEST_F(ArrayElementwiseOpTest, CannotAddOpaques) { // broadcast. TEST_F(ArrayElementwiseOpTest, ImplictBroadcastInFusedExpressions) { ComputationBuilder builder(client_, TestName()); - auto x_literal = LiteralUtil::CreateR1<float>({1, 2, 3}); - auto y_literal = LiteralUtil::CreateR1<float>({4, 5}); + auto x_literal = Literal::CreateR1<float>({1, 2, 3}); + auto y_literal = Literal::CreateR1<float>({4, 5}); auto x_data = client_->TransferToServer(*x_literal).ConsumeValueOrDie(); auto y_data = client_->TransferToServer(*y_literal).ConsumeValueOrDie(); @@ -1877,7 +1877,7 @@ XLA_TEST_F(ArrayElementwiseOpTest, ReducePrecisionNoOpParamF32) { std::vector<float> a_values = {-2.5f, 25.5f}; - std::unique_ptr<Literal> a_literal = LiteralUtil::CreateR1<float>({a_values}); + std::unique_ptr<Literal> a_literal = Literal::CreateR1<float>({a_values}); std::unique_ptr<GlobalData> a_data = client_->TransferToServer(*a_literal).ConsumeValueOrDie(); auto a_param = builder.Parameter(0, a_literal->shape(), "a_param"); diff --git a/tensorflow/compiler/xla/tests/batch_normalization_test.cc b/tensorflow/compiler/xla/tests/batch_normalization_test.cc index b9b051b78c..9f9f541230 100644 --- a/tensorflow/compiler/xla/tests/batch_normalization_test.cc +++ b/tensorflow/compiler/xla/tests/batch_normalization_test.cc @@ -47,7 +47,7 @@ class BatchNormalizationTest : public ClientLibraryTestBase { {5.0f, 4.4f}, // p2 }); input_array_.FillWithPZ(pz); - input_literal_ = *LiteralUtil::CreateR4FromArray4D(input_array_); + input_literal_ = *Literal::CreateR4FromArray4D(input_array_); CHECK_EQ(kSamples, input_array_.planes()); CHECK_EQ(kZ, input_array_.depth()); CHECK_EQ(kY, input_array_.height()); diff --git a/tensorflow/compiler/xla/tests/broadcast_simple_test.cc b/tensorflow/compiler/xla/tests/broadcast_simple_test.cc index b8c35cad47..aab2c74634 100644 --- a/tensorflow/compiler/xla/tests/broadcast_simple_test.cc +++ b/tensorflow/compiler/xla/tests/broadcast_simple_test.cc @@ -62,9 +62,8 @@ class BroadcastSimpleTest : public ClientLibraryTestBase { Array3D<float>* r3_array, float start, float end, int seed) { *r3_shape = ShapeUtil::MakeShapeWithLayout(F32, bounds, minor_to_major); r3_array->FillRandom(start, end, seed); - auto r3_data = - LiteralUtil::Relayout(*LiteralUtil::CreateR3FromArray3D(*r3_array), - LayoutUtil::MakeLayout(minor_to_major)); + auto r3_data = Literal::CreateR3FromArray3D(*r3_array)->Relayout( + LayoutUtil::MakeLayout(minor_to_major)); std::unique_ptr<GlobalData> r3_global_data = client_->TransferToServer(*r3_data).ConsumeValueOrDie(); return r3_global_data; @@ -76,9 +75,8 @@ class BroadcastSimpleTest : public ClientLibraryTestBase { Array2D<float>* r2_array, float start, float end, int seed) { *r2_shape = ShapeUtil::MakeShapeWithLayout(F32, bounds, minor_to_major); r2_array->FillRandom(start, end, seed); - auto r2_data = - LiteralUtil::Relayout(*LiteralUtil::CreateR2FromArray2D(*r2_array), - LayoutUtil::MakeLayout(minor_to_major)); + auto r2_data = Literal::CreateR2FromArray2D(*r2_array)->Relayout( + LayoutUtil::MakeLayout(minor_to_major)); std::unique_ptr<GlobalData> r2_global_data = client_->TransferToServer(*r2_data).ConsumeValueOrDie(); return r2_global_data; @@ -216,13 +214,13 @@ XLA_TEST_F(BroadcastSimpleTest, InDimensionAndDegenerateBroadcasting) { ComputationBuilder b(client_, TestName()); b.Add(b.ConstantR2<float>({{1.0, 5.0}}), - b.ConstantLiteral(*LiteralUtil::CreateR3<float>( + b.ConstantLiteral(*Literal::CreateR3<float>( {{{2.0}, {3.0}, {4.0}}, {{5.0}, {6.0}, {7.0}}})), /*broadcast_dimensions=*/{1, 2}); auto expected = - LiteralUtil::CreateR3<float>({{{3.0, 7.0}, {4.0, 8.0}, {5.0, 9.0}}, - {{6.0, 10.0}, {7.0, 11.0}, {8.0, 12.0}}}); + Literal::CreateR3<float>({{{3.0, 7.0}, {4.0, 8.0}, {5.0, 9.0}}, + {{6.0, 10.0}, {7.0, 11.0}, {8.0, 12.0}}}); ComputeAndCompareLiteral(&b, *expected, {}, ErrorSpec(0.0001)); } @@ -291,7 +289,7 @@ XLA_TEST_P(BroadcastR3ImplicitTest, Doit) { } } } - auto expected = LiteralUtil::CreateR3FromArray3D(expected_array); + auto expected = Literal::CreateR3FromArray3D(expected_array); ComputeAndCompareLiteral( &builder, *expected, {r3_implicit_global_data.get(), r3_global_data.get()}, @@ -316,7 +314,7 @@ XLA_TEST_F(BroadcastSimpleTest, Add3DTo3DDegenerate_1_2) { b.Add(r3h, r1h); auto expected = - LiteralUtil::CreateR3<float>({{{2, 3}, {4, 5}}, {{7, 8}, {9, 10}}}); + Literal::CreateR3<float>({{{2, 3}, {4, 5}}, {{7, 8}, {9, 10}}}); ComputeAndCompareLiteral(&b, *expected, {r3.get(), r1.get()}, ErrorSpec(0.0001)); @@ -324,81 +322,79 @@ XLA_TEST_F(BroadcastSimpleTest, Add3DTo3DDegenerate_1_2) { XLA_TEST_F(BroadcastSimpleTest, Add3DTo3DDegenerate_0_1) { ComputationBuilder b(client_, TestName()); - auto r1 = b.ConstantLiteral(*LiteralUtil::CreateR3<float>({{{1, 2}}})); + auto r1 = b.ConstantLiteral(*Literal::CreateR3<float>({{{1, 2}}})); auto r3 = b.ConstantLiteral( - *LiteralUtil::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}})); + *Literal::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}})); b.Add(r3, r1); auto expected = - LiteralUtil::CreateR3<float>({{{2, 4}, {4, 6}}, {{6, 8}, {8, 10}}}); + Literal::CreateR3<float>({{{2, 4}, {4, 6}}, {{6, 8}, {8, 10}}}); ComputeAndCompareLiteral(&b, *expected, {}, ErrorSpec(0.0001)); } XLA_TEST_F(BroadcastSimpleTest, Add3DTo3DDegenerate_0_2) { ComputationBuilder b(client_, TestName()); - auto r1 = b.ConstantLiteral(*LiteralUtil::CreateR3<float>({{{1}, {2}}})); + auto r1 = b.ConstantLiteral(*Literal::CreateR3<float>({{{1}, {2}}})); auto r3 = b.ConstantLiteral( - *LiteralUtil::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}})); + *Literal::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}})); b.Add(r3, r1); auto expected = - LiteralUtil::CreateR3<float>({{{2, 3}, {5, 6}}, {{6, 7}, {9, 10}}}); + Literal::CreateR3<float>({{{2, 3}, {5, 6}}, {{6, 7}, {9, 10}}}); ComputeAndCompareLiteral(&b, *expected, {}, ErrorSpec(0.0001)); } XLA_TEST_F(BroadcastSimpleTest, Add3DTo3DDegenerate_0) { ComputationBuilder b(client_, TestName()); - auto r1 = - b.ConstantLiteral(*LiteralUtil::CreateR3<float>({{{1, 2}, {3, 4}}})); + auto r1 = b.ConstantLiteral(*Literal::CreateR3<float>({{{1, 2}, {3, 4}}})); auto r3 = b.ConstantLiteral( - *LiteralUtil::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}})); + *Literal::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}})); b.Add(r3, r1); auto expected = - LiteralUtil::CreateR3<float>({{{2, 4}, {6, 8}}, {{6, 8}, {10, 12}}}); + Literal::CreateR3<float>({{{2, 4}, {6, 8}}, {{6, 8}, {10, 12}}}); ComputeAndCompareLiteral(&b, *expected, {}, ErrorSpec(0.0001)); } XLA_TEST_F(BroadcastSimpleTest, Add3DTo3DDegenerate_1) { ComputationBuilder b(client_, TestName()); - auto r1 = - b.ConstantLiteral(*LiteralUtil::CreateR3<float>({{{1, 2}}, {{3, 4}}})); + auto r1 = b.ConstantLiteral(*Literal::CreateR3<float>({{{1, 2}}, {{3, 4}}})); auto r3 = b.ConstantLiteral( - *LiteralUtil::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}})); + *Literal::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}})); b.Add(r3, r1); auto expected = - LiteralUtil::CreateR3<float>({{{2, 4}, {4, 6}}, {{8, 10}, {10, 12}}}); + Literal::CreateR3<float>({{{2, 4}, {4, 6}}, {{8, 10}, {10, 12}}}); ComputeAndCompareLiteral(&b, *expected, {}, ErrorSpec(0.0001)); } XLA_TEST_F(BroadcastSimpleTest, Add3DTo3DDegenerate_2) { ComputationBuilder b(client_, TestName()); - auto r1 = b.ConstantLiteral( - *LiteralUtil::CreateR3<float>({{{1}, {2}}, {{3}, {4}}})); + auto r1 = + b.ConstantLiteral(*Literal::CreateR3<float>({{{1}, {2}}, {{3}, {4}}})); auto r3 = b.ConstantLiteral( - *LiteralUtil::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}})); + *Literal::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}})); b.Add(r3, r1); auto expected = - LiteralUtil::CreateR3<float>({{{2, 3}, {5, 6}}, {{8, 9}, {11, 12}}}); + Literal::CreateR3<float>({{{2, 3}, {5, 6}}, {{8, 9}, {11, 12}}}); ComputeAndCompareLiteral(&b, *expected, {}, ErrorSpec(0.0001)); } XLA_TEST_F(BroadcastSimpleTest, Add3DTo3DDegenerate_0_1_2) { ComputationBuilder b(client_, TestName()); - auto r1 = b.ConstantLiteral(*LiteralUtil::CreateR3<float>({{{1}}})); + auto r1 = b.ConstantLiteral(*Literal::CreateR3<float>({{{1}}})); auto r3 = b.ConstantLiteral( - *LiteralUtil::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}})); + *Literal::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}})); b.Add(r3, r1); auto expected = - LiteralUtil::CreateR3<float>({{{2, 3}, {4, 5}}, {{6, 7}, {8, 9}}}); + Literal::CreateR3<float>({{{2, 3}, {4, 5}}, {{6, 7}, {8, 9}}}); ComputeAndCompareLiteral(&b, *expected, {}, ErrorSpec(0.0001)); } @@ -540,7 +536,7 @@ XLA_TEST_P(BroadcastR2ImplicitTest, Doit) { *v = ApplyOpToFloats(spec.op2, tmp, v3); }); - auto expected = LiteralUtil::CreateR2FromArray2D(expected_array); + auto expected = Literal::CreateR2FromArray2D(expected_array); ComputeAndCompareLiteral( &builder, *expected, {r2_implicit_global_data1.get(), r2_global_data.get(), @@ -554,22 +550,22 @@ INSTANTIATE_TEST_CASE_P(BroadcastR2ImplicitTestInstances, XLA_TEST_F(BroadcastSimpleTest, Add2DTo2DDegenerate_0) { ComputationBuilder b(client_, TestName()); - auto r1 = b.ConstantLiteral(*LiteralUtil::CreateR2<float>({{1, 2}})); - auto r2 = b.ConstantLiteral(*LiteralUtil::CreateR2<float>({{1, 2}, {3, 4}})); + auto r1 = b.ConstantLiteral(*Literal::CreateR2<float>({{1, 2}})); + auto r2 = b.ConstantLiteral(*Literal::CreateR2<float>({{1, 2}, {3, 4}})); b.Add(r2, r1); - auto expected = LiteralUtil::CreateR2<float>({{2, 4}, {4, 6}}); + auto expected = Literal::CreateR2<float>({{2, 4}, {4, 6}}); ComputeAndCompareLiteral(&b, *expected, {}, ErrorSpec(0.0001)); } XLA_TEST_F(BroadcastSimpleTest, Add2DTo2DDegenerate_1) { ComputationBuilder b(client_, TestName()); - auto r1 = b.ConstantLiteral(*LiteralUtil::CreateR2<float>({{1}, {2}})); - auto r2 = b.ConstantLiteral(*LiteralUtil::CreateR2<float>({{1, 2}, {3, 4}})); + auto r1 = b.ConstantLiteral(*Literal::CreateR2<float>({{1}, {2}})); + auto r2 = b.ConstantLiteral(*Literal::CreateR2<float>({{1, 2}, {3, 4}})); b.Add(r2, r1); - auto expected = LiteralUtil::CreateR2<float>({{2, 3}, {5, 6}}); + auto expected = Literal::CreateR2<float>({{2, 3}, {5, 6}}); ComputeAndCompareLiteral(&b, *expected, {}, ErrorSpec(0.0001)); } @@ -578,11 +574,11 @@ XLA_TEST_F(BroadcastSimpleTest, Add1DTo3DInDim0) { ComputationBuilder b(client_, TestName()); auto r1 = b.ConstantR1<float>({10, 20}); auto r3 = b.ConstantLiteral( - *LiteralUtil::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}})); + *Literal::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}})); b.Add(r3, r1, {0}); - auto expected = LiteralUtil::CreateR3<float>( - {{{11, 12}, {13, 14}}, {{25, 26}, {27, 28}}}); + auto expected = + Literal::CreateR3<float>({{{11, 12}, {13, 14}}, {{25, 26}, {27, 28}}}); ComputeAndCompareLiteral(&b, *expected, {}, ErrorSpec(0.0001)); } @@ -591,11 +587,11 @@ XLA_TEST_F(BroadcastSimpleTest, Add1DTo3DInDim1) { ComputationBuilder b(client_, TestName()); auto r1 = b.ConstantR1<float>({10, 20}); auto r3 = b.ConstantLiteral( - *LiteralUtil::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}})); + *Literal::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}})); b.Add(r1, r3, {1}); - auto expected = LiteralUtil::CreateR3<float>( - {{{11, 12}, {23, 24}}, {{15, 16}, {27, 28}}}); + auto expected = + Literal::CreateR3<float>({{{11, 12}, {23, 24}}, {{15, 16}, {27, 28}}}); ComputeAndCompareLiteral(&b, *expected, {}, ErrorSpec(0.0001)); } @@ -604,11 +600,11 @@ XLA_TEST_F(BroadcastSimpleTest, Add1DTo3DInDim2) { ComputationBuilder b(client_, TestName()); auto r1 = b.ConstantR1<float>({10, 20}); auto r3 = b.ConstantLiteral( - *LiteralUtil::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}})); + *Literal::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}})); b.Add(r1, r3, {2}); - auto expected = LiteralUtil::CreateR3<float>( - {{{11, 22}, {13, 24}}, {{15, 26}, {17, 28}}}); + auto expected = + Literal::CreateR3<float>({{{11, 22}, {13, 24}}, {{15, 26}, {17, 28}}}); ComputeAndCompareLiteral(&b, *expected, {}, ErrorSpec(0.0001)); } @@ -619,7 +615,7 @@ XLA_TEST_F(BroadcastSimpleTest, Add1DTo3DInDimAll) { auto r1_1 = b.ConstantR1<float>({100, 200}); auto r1_2 = b.ConstantR1<float>({10, 20}); auto r3 = b.ConstantLiteral( - *LiteralUtil::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}})); + *Literal::CreateR3<float>({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}})); for (int i = 0; i < 3; ++i) { r3 = b.Add(r1_0, r3, {0}); r3 = b.Add(r3, r1_1, {1}); @@ -627,7 +623,7 @@ XLA_TEST_F(BroadcastSimpleTest, Add1DTo3DInDimAll) { } r3 = b.Mul(r3, b.ConstantR0<float>(-2)); - auto expected = LiteralUtil::CreateR3<float>( + auto expected = Literal::CreateR3<float>( {{{-6 * 1110 - 2, -6 * 1120 - 4}, {-6 * 1210 - 6, -6 * 1220 - 8}}, {{-6 * 2110 - 10, -6 * 2120 - 12}, {-6 * 2210 - 14, -6 * 2220 - 16}}}); @@ -648,7 +644,7 @@ XLA_TEST_F(BroadcastSimpleTest, Add1DTo3DInDimAllWithScalarBroadcast) { } r3 = b.Mul(r3, b.ConstantR0<float>(-1)); - auto expected = LiteralUtil::CreateR3<float>( + auto expected = Literal::CreateR3<float>( {{{-3 * 1110 - 3, -3 * 1120 - 3}, {-3 * 1210 - 3, -3 * 1220 - 3}}, {{-3 * 2110 - 3, -3 * 2120 - 3}, {-3 * 2210 - 3, -3 * 2220 - 3}}}); @@ -661,7 +657,7 @@ XLA_TEST_F(BroadcastSimpleTest, InvalidBinaryAndDegenerateBroadcasting) { ComputationBuilder b(client_, TestName()); b.Add(b.ConstantR2<float>({{1.0, 5.0}, {1.0, 5.0}}), - b.ConstantLiteral(*LiteralUtil::CreateR3<float>( + b.ConstantLiteral(*Literal::CreateR3<float>( {{{2.0}, {3.0}, {4.0}}, {{5.0}, {6.0}, {7.0}}})), /*broadcast_dimensions=*/{1, 2}); diff --git a/tensorflow/compiler/xla/tests/broadcast_test.cc b/tensorflow/compiler/xla/tests/broadcast_test.cc index 820f0ab6f3..dc1443f536 100644 --- a/tensorflow/compiler/xla/tests/broadcast_test.cc +++ b/tensorflow/compiler/xla/tests/broadcast_test.cc @@ -38,7 +38,7 @@ XLA_TEST_F(BroadcastTest, BroadcastScalarToScalar) { // Test degenerate case of broadcasting a scalar into a scalar. auto builder = HloComputation::Builder(TestName()); auto input = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0))); builder.AddInstruction(HloInstruction::CreateBroadcast( ShapeUtil::MakeShape(F32, {}), input, {})); @@ -47,14 +47,14 @@ XLA_TEST_F(BroadcastTest, BroadcastScalarToScalar) { hlo_module->AddEntryComputation(builder.Build()); auto result = ExecuteAndTransfer(std::move(hlo_module), {}); - LiteralTestUtil::ExpectNear(*LiteralUtil::CreateR0<float>(42.0), *result, + LiteralTestUtil::ExpectNear(*Literal::CreateR0<float>(42.0), *result, error_spec_); } XLA_TEST_F(BroadcastTest, BroadcastScalarTo2D) { auto builder = HloComputation::Builder(TestName()); auto input = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0))); builder.AddInstruction(HloInstruction::CreateBroadcast( ShapeUtil::MakeShape(F32, {2, 2}), input, {})); @@ -64,14 +64,14 @@ XLA_TEST_F(BroadcastTest, BroadcastScalarTo2D) { auto result = ExecuteAndTransfer(std::move(hlo_module), {}); LiteralTestUtil::ExpectNear( - *LiteralUtil::CreateR2<float>({{42.0, 42.0}, {42.0, 42.0}}), *result, + *Literal::CreateR2<float>({{42.0, 42.0}, {42.0, 42.0}}), *result, error_spec_); } XLA_TEST_F(BroadcastTest, BroadcastVectorTo2D) { auto builder = HloComputation::Builder(TestName()); auto input = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0}))); + Literal::CreateR1<float>({1.0, 2.0, 3.0}))); // Broadcast vector in both dimension 0 and dimension 1. Join them in a tuple // to enable testing of the results. @@ -87,18 +87,18 @@ XLA_TEST_F(BroadcastTest, BroadcastVectorTo2D) { auto result = ExecuteAndTransfer(std::move(hlo_module), {}); LiteralTestUtil::ExpectNear( - *LiteralUtil::CreateR2<float>({{1.0, 1.0}, {2.0, 2.0}, {3.0, 3.0}}), + *Literal::CreateR2<float>({{1.0, 1.0}, {2.0, 2.0}, {3.0, 3.0}}), result->tuple_literals(0), error_spec_); LiteralTestUtil::ExpectNear( - *LiteralUtil::CreateR2<float>({{1.0, 2.0, 3.0}, {1.0, 2.0, 3.0}}), + *Literal::CreateR2<float>({{1.0, 2.0, 3.0}, {1.0, 2.0, 3.0}}), result->tuple_literals(1), error_spec_); } XLA_TEST_F(BroadcastTest, Broadcast2DTo2D) { auto builder = HloComputation::Builder(TestName()); auto input = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}))); + Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}))); builder.AddInstruction(HloInstruction::CreateBroadcast( ShapeUtil::MakeShape(F32, {2, 2}), input, {0, 1})); @@ -108,7 +108,7 @@ XLA_TEST_F(BroadcastTest, Broadcast2DTo2D) { auto result = ExecuteAndTransfer(std::move(hlo_module), {}); LiteralTestUtil::ExpectNear( - *LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}), *result, + *Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}), *result, error_spec_); } @@ -117,7 +117,7 @@ XLA_TEST_F(BroadcastTest, Broadcast2DTo2DTranspose) { // the dimensions, ie transpose. auto builder = HloComputation::Builder(TestName()); auto input = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}))); + Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}))); builder.AddInstruction(HloInstruction::CreateBroadcast( ShapeUtil::MakeShape(F32, {2, 2}), input, {1, 0})); @@ -127,14 +127,14 @@ XLA_TEST_F(BroadcastTest, Broadcast2DTo2DTranspose) { auto result = ExecuteAndTransfer(std::move(hlo_module), {}); LiteralTestUtil::ExpectNear( - *LiteralUtil::CreateR2<float>({{1.0, 3.0}, {2.0, 4.0}}), *result, + *Literal::CreateR2<float>({{1.0, 3.0}, {2.0, 4.0}}), *result, error_spec_); } XLA_TEST_F(BroadcastTest, Broadcast2DTo3D) { auto builder = HloComputation::Builder(TestName()); auto input = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}))); + Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}))); builder.AddInstruction(HloInstruction::CreateBroadcast( ShapeUtil::MakeShape(F32, {2, 3, 2}), input, {0, 2})); @@ -144,15 +144,15 @@ XLA_TEST_F(BroadcastTest, Broadcast2DTo3D) { auto result = ExecuteAndTransfer(std::move(hlo_module), {}); LiteralTestUtil::ExpectNear( - *LiteralUtil::CreateR3<float>({{{1.0, 2.0}, {1.0, 2.0}, {1.0, 2.0}}, - {{3.0, 4.0}, {3.0, 4.0}, {3.0, 4.0}}}), + *Literal::CreateR3<float>({{{1.0, 2.0}, {1.0, 2.0}, {1.0, 2.0}}, + {{3.0, 4.0}, {3.0, 4.0}, {3.0, 4.0}}}), *result, error_spec_); } TEST_F(BroadcastTest, Broadcast_R1_2_To_R4_2x2x3x3) { auto builder = HloComputation::Builder(TestName()); auto input = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({1.0, 2.0}))); + HloInstruction::CreateConstant(Literal::CreateR1<float>({1.0, 2.0}))); // Broadcast vector in dimension 1. builder.AddInstruction(HloInstruction::CreateBroadcast( @@ -167,8 +167,8 @@ TEST_F(BroadcastTest, Broadcast_R1_2_To_R4_2x2x3x3) { Array2D<float> pz({{1, 2}, {1, 2}}); expected.FillWithPZ(pz); - LiteralTestUtil::ExpectNear( - *LiteralUtil::CreateR4FromArray4D<float>(expected), *result, error_spec_); + LiteralTestUtil::ExpectNear(*Literal::CreateR4FromArray4D<float>(expected), + *result, error_spec_); } TEST_F(BroadcastTest, Broadcast_R1_1025_To_R4_3x3x3x1025) { @@ -177,7 +177,7 @@ TEST_F(BroadcastTest, Broadcast_R1_1025_To_R4_3x3x3x1025) { int64 r1_size = input_data.size(); std::iota(input_data.begin(), input_data.end(), 0.0f); auto input = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(input_data))); + HloInstruction::CreateConstant(Literal::CreateR1<float>(input_data))); // Broadcast vector in dimension 3. builder.AddInstruction(HloInstruction::CreateBroadcast( @@ -197,8 +197,8 @@ TEST_F(BroadcastTest, Broadcast_R1_1025_To_R4_3x3x3x1025) { } expected.FillWithYX(yx); - LiteralTestUtil::ExpectNear( - *LiteralUtil::CreateR4FromArray4D<float>(expected), *result, error_spec_); + LiteralTestUtil::ExpectNear(*Literal::CreateR4FromArray4D<float>(expected), + *result, error_spec_); } XLA_TEST_F(BroadcastTest, Broadcast_R1_64_To_R4_32x64x7x7) { @@ -208,7 +208,7 @@ XLA_TEST_F(BroadcastTest, Broadcast_R1_64_To_R4_32x64x7x7) { std::vector<float> r1_array(64, 42.0); auto input = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(r1_array))); + HloInstruction::CreateConstant(Literal::CreateR1<float>(r1_array))); // Broadcast vector in dimension 1. builder.AddInstruction(HloInstruction::CreateBroadcast( @@ -219,14 +219,14 @@ XLA_TEST_F(BroadcastTest, Broadcast_R1_64_To_R4_32x64x7x7) { hlo_module->AddEntryComputation(builder.Build()); auto result = ExecuteAndTransfer(std::move(hlo_module), {}); - LiteralTestUtil::ExpectNear(*LiteralUtil::CreateR4FromArray4D(r4_array), - *result, error_spec_); + LiteralTestUtil::ExpectNear(*Literal::CreateR4FromArray4D(r4_array), *result, + error_spec_); } TEST_F(BroadcastTest, Broadcast_R0_to_R4_64x64x3x3) { auto builder = HloComputation::Builder(TestName()); auto input = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(1.0f))); builder.AddInstruction(HloInstruction::CreateBroadcast( ShapeUtil::MakeShape(F32, {64, 64, 3, 3}), input, {})); @@ -239,15 +239,15 @@ TEST_F(BroadcastTest, Broadcast_R0_to_R4_64x64x3x3) { Array4D<float> expected(64, 64, 3, 3); expected.Fill(1.0f); - LiteralTestUtil::ExpectNear( - *LiteralUtil::CreateR4FromArray4D<float>(expected), *result, error_spec_); + LiteralTestUtil::ExpectNear(*Literal::CreateR4FromArray4D<float>(expected), + *result, error_spec_); } TEST_F(BroadcastTest, Broadcast_R2_2x2_To_R4_3x3x2x2) { auto builder = HloComputation::Builder(TestName()); Array2D<float> to_broadcast({{1.0f, 2.0f}, {3.0f, 4.0f}}); auto input = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2FromArray2D<float>(to_broadcast))); + Literal::CreateR2FromArray2D<float>(to_broadcast))); // Broadcast vector in dimensions 2 and 3. builder.AddInstruction(HloInstruction::CreateBroadcast( @@ -261,8 +261,8 @@ TEST_F(BroadcastTest, Broadcast_R2_2x2_To_R4_3x3x2x2) { Array4D<float> expected(3, 3, 2, 2); expected.FillWithYX(to_broadcast); - LiteralTestUtil::ExpectNear( - *LiteralUtil::CreateR4FromArray4D<float>(expected), *result, error_spec_); + LiteralTestUtil::ExpectNear(*Literal::CreateR4FromArray4D<float>(expected), + *result, error_spec_); } TEST_F(BroadcastTest, Broadcast_R3_2x3x4_to_R4_2x3x4x5) { @@ -281,7 +281,7 @@ TEST_F(BroadcastTest, Broadcast_R3_2x3x4_to_R4_2x3x4x5) { } } auto input = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR3FromArray3D<float>(input_vals))); + Literal::CreateR3FromArray3D<float>(input_vals))); // Broadcast vector in dimensions 2 and 3. builder.AddInstruction(HloInstruction::CreateBroadcast( @@ -292,8 +292,8 @@ TEST_F(BroadcastTest, Broadcast_R3_2x3x4_to_R4_2x3x4x5) { hlo_module->AddEntryComputation(builder.Build()); auto result = ExecuteAndTransfer(std::move(hlo_module), {}); - LiteralTestUtil::ExpectNear( - *LiteralUtil::CreateR4FromArray4D<float>(expected), *result, error_spec_); + LiteralTestUtil::ExpectNear(*Literal::CreateR4FromArray4D<float>(expected), + *result, error_spec_); } } // namespace diff --git a/tensorflow/compiler/xla/tests/call_test.cc b/tensorflow/compiler/xla/tests/call_test.cc index 0d587a4964..086199fda1 100644 --- a/tensorflow/compiler/xla/tests/call_test.cc +++ b/tensorflow/compiler/xla/tests/call_test.cc @@ -77,7 +77,7 @@ class CallOpTest : public ClientLibraryTestBase { XLA_TEST_F(CallOpTest, DISABLED_ON_GPU(CallR0F32IdentityScalar)) { ComputationBuilder builder(client_, TestName()); Computation callee = CreateR0F32IdentityComputation(); - auto constant = builder.ConstantLiteral(*LiteralUtil::CreateR0<float>(42.0)); + auto constant = builder.ConstantLiteral(*Literal::CreateR0<float>(42.0)); builder.Call(callee, {constant}); ComputeAndCompareR0<float>(&builder, 42.0, {}, ErrorSpec(0.01f)); @@ -86,8 +86,8 @@ XLA_TEST_F(CallOpTest, DISABLED_ON_GPU(CallR0F32IdentityScalar)) { XLA_TEST_F(CallOpTest, DISABLED_ON_GPU(CallR1S0F32AddArray)) { ComputationBuilder builder(client_, TestName()); Computation callee = CreateR1S0F32AdditionComputation(); - auto x = builder.ConstantLiteral(*LiteralUtil::CreateR1<float>({})); - auto y = builder.ConstantLiteral(*LiteralUtil::CreateR1<float>({})); + auto x = builder.ConstantLiteral(*Literal::CreateR1<float>({})); + auto y = builder.ConstantLiteral(*Literal::CreateR1<float>({})); builder.Call(callee, {x, y}); ComputeAndCompareR1<float>(&builder, {}, {}, ErrorSpec(0.01f)); @@ -96,8 +96,8 @@ XLA_TEST_F(CallOpTest, DISABLED_ON_GPU(CallR1S0F32AddArray)) { XLA_TEST_F(CallOpTest, DISABLED_ON_GPU(CallR1S2F32AddArray)) { ComputationBuilder builder(client_, TestName()); Computation callee = CreateR1S2F32AdditionComputation(); - auto x = builder.ConstantLiteral(*LiteralUtil::CreateR1<float>({1.0f, 2.0f})); - auto y = builder.ConstantLiteral(*LiteralUtil::CreateR1<float>({2.0f, 3.0f})); + auto x = builder.ConstantLiteral(*Literal::CreateR1<float>({1.0f, 2.0f})); + auto y = builder.ConstantLiteral(*Literal::CreateR1<float>({2.0f, 3.0f})); builder.Call(callee, {x, y}); ComputeAndCompareR1<float>(&builder, {3.0f, 5.0f}, {}, ErrorSpec(0.01f)); @@ -106,8 +106,8 @@ XLA_TEST_F(CallOpTest, DISABLED_ON_GPU(CallR1S2F32AddArray)) { XLA_TEST_F(CallOpTest, DISABLED_ON_GPU(CallR0F32Tuple)) { ComputationBuilder builder(client_, TestName()); Computation callee = CreateR0F32TupleComputation(); - auto elem = LiteralUtil::CreateR0<float>(42.0); - auto tuple = LiteralUtil::MakeTuple({elem.get()}); + auto elem = Literal::CreateR0<float>(42.0); + auto tuple = Literal::MakeTuple({elem.get()}); builder.Call(callee, {builder.ConstantLiteral(*elem)}); ComputeAndCompareTuple(&builder, *tuple, {}, ErrorSpec(0.01f)); diff --git a/tensorflow/compiler/xla/tests/check_execution_arity_test.cc b/tensorflow/compiler/xla/tests/check_execution_arity_test.cc index b2433f03d3..2f4ad22f5b 100644 --- a/tensorflow/compiler/xla/tests/check_execution_arity_test.cc +++ b/tensorflow/compiler/xla/tests/check_execution_arity_test.cc @@ -37,7 +37,7 @@ class CheckExecutionArityTest : public ClientLibraryTestBase {}; TEST_F(CheckExecutionArityTest, TwoParamComputationNumArguments) { ComputationBuilder builder(client_, "add_two_params"); - auto param_literal = LiteralUtil::CreateR1<float>({1.1f, 2.2f}); + auto param_literal = Literal::CreateR1<float>({1.1f, 2.2f}); auto p0 = builder.Parameter(0, param_literal->shape(), "param0"); auto p1 = builder.Parameter(1, param_literal->shape(), "param1"); @@ -86,12 +86,12 @@ XLA_TEST_F(CheckExecutionArityTest, CheckArgumentShapes) { ASSERT_IS_OK(computation_status.status()); auto computation = computation_status.ConsumeValueOrDie(); - auto f32_literal = LiteralUtil::CreateR0<float>(1.1f); + auto f32_literal = Literal::CreateR0<float>(1.1f); auto f32_data = client_->TransferToServer(*f32_literal).ConsumeValueOrDie(); - auto f32_4_literal = LiteralUtil::CreateR1<float>({1.0f, 2.0f, 3.0f, 4.0f}); + auto f32_4_literal = Literal::CreateR1<float>({1.0f, 2.0f, 3.0f, 4.0f}); auto f32_4_data = client_->TransferToServer(*f32_4_literal).ConsumeValueOrDie(); - auto u8_4_literal = LiteralUtil::CreateR1U8("hola"); + auto u8_4_literal = Literal::CreateR1U8("hola"); auto u8_4_data = client_->TransferToServer(*u8_4_literal).ConsumeValueOrDie(); // Match diff --git a/tensorflow/compiler/xla/tests/client_library_test_base.cc b/tensorflow/compiler/xla/tests/client_library_test_base.cc index b96bb8f846..6094a31231 100644 --- a/tensorflow/compiler/xla/tests/client_library_test_base.cc +++ b/tensorflow/compiler/xla/tests/client_library_test_base.cc @@ -113,14 +113,14 @@ string ClientLibraryTestBase::ExecuteToString( if (!result.ok()) { return result.status().ToString(); } else { - return LiteralUtil::ToString(*result.ValueOrDie()); + return result.ValueOrDie()->ToString(); } } void ClientLibraryTestBase::ComputeAndCompareR1( ComputationBuilder* builder, const tensorflow::core::Bitmap& expected, tensorflow::gtl::ArraySlice<GlobalData*> arguments) { - std::unique_ptr<Literal> expected_literal = LiteralUtil::CreateR1(expected); + std::unique_ptr<Literal> expected_literal = Literal::CreateR1(expected); ClientLibraryTestBase::ComputeAndCompareLiteral(builder, *expected_literal, arguments); } @@ -179,10 +179,10 @@ void ClientLibraryTestBase::ComputeAndCompareR1U8( auto actual = actual_status.ConsumeValueOrDie(); // Turn the expected value into a literal. - std::unique_ptr<Literal> expected_literal = LiteralUtil::CreateR1U8(expected); + std::unique_ptr<Literal> expected_literal = Literal::CreateR1U8(expected); - VLOG(1) << "expected: " << LiteralUtil::ToString(*expected_literal); - VLOG(1) << "actual: " << LiteralUtil::ToString(*actual); + VLOG(1) << "expected: " << expected_literal->ToString(); + VLOG(1) << "actual: " << actual->ToString(); EXPECT_EQ(expected, actual->u8s_string()); } diff --git a/tensorflow/compiler/xla/tests/client_library_test_base.h b/tensorflow/compiler/xla/tests/client_library_test_base.h index f9e1082ebb..a5cb74bdfa 100644 --- a/tensorflow/compiler/xla/tests/client_library_test_base.h +++ b/tensorflow/compiler/xla/tests/client_library_test_base.h @@ -278,7 +278,7 @@ void ClientLibraryTestBase::ComputeAndCompareR0( ComputationBuilder* builder, NativeT expected, tensorflow::gtl::ArraySlice<GlobalData*> arguments) { std::unique_ptr<Literal> expected_literal = - LiteralUtil::CreateR0<NativeT>(expected); + Literal::CreateR0<NativeT>(expected); ClientLibraryTestBase::ComputeAndCompareLiteral(builder, *expected_literal, arguments); } @@ -291,7 +291,7 @@ void ClientLibraryTestBase::ComputeAndCompareR0( std::is_same<NativeT, double>::value, "Floating point type required when specifying an ErrorSpec"); std::unique_ptr<Literal> expected_literal = - LiteralUtil::CreateR0<NativeT>(expected); + Literal::CreateR0<NativeT>(expected); ClientLibraryTestBase::ComputeAndCompareLiteral(builder, *expected_literal, arguments, error); } @@ -301,7 +301,7 @@ void ClientLibraryTestBase::ComputeAndCompareR1( ComputationBuilder* builder, tensorflow::gtl::ArraySlice<NativeT> expected, tensorflow::gtl::ArraySlice<GlobalData*> arguments) { std::unique_ptr<Literal> expected_literal = - LiteralUtil::CreateR1<NativeT>(expected); + Literal::CreateR1<NativeT>(expected); ClientLibraryTestBase::ComputeAndCompareLiteral(builder, *expected_literal, arguments); } @@ -314,7 +314,7 @@ void ClientLibraryTestBase::ComputeAndCompareR1( std::is_same<NativeT, double>::value, "Floating point type required when specifying an ErrorSpec"); std::unique_ptr<Literal> expected_literal = - LiteralUtil::CreateR1<NativeT>(expected); + Literal::CreateR1<NativeT>(expected); ClientLibraryTestBase::ComputeAndCompareLiteral(builder, *expected_literal, arguments, error); } @@ -324,7 +324,7 @@ void ClientLibraryTestBase::ComputeAndCompareR2( ComputationBuilder* builder, const Array2D<NativeT>& expected, tensorflow::gtl::ArraySlice<GlobalData*> arguments) { std::unique_ptr<Literal> expected_literal = - LiteralUtil::CreateR2FromArray2D<NativeT>(expected); + Literal::CreateR2FromArray2D<NativeT>(expected); ClientLibraryTestBase::ComputeAndCompareLiteral(builder, *expected_literal, arguments); } @@ -337,7 +337,7 @@ void ClientLibraryTestBase::ComputeAndCompareR2( std::is_same<NativeT, double>::value, "Floating point type required when specifying an ErrorSpec"); std::unique_ptr<Literal> expected_literal = - LiteralUtil::CreateR2FromArray2D<NativeT>(expected); + Literal::CreateR2FromArray2D<NativeT>(expected); ClientLibraryTestBase::ComputeAndCompareLiteral(builder, *expected_literal, arguments, error); } @@ -347,7 +347,7 @@ void ClientLibraryTestBase::ComputeAndCompareR3( ComputationBuilder* builder, const Array3D<NativeT>& expected, tensorflow::gtl::ArraySlice<GlobalData*> arguments) { std::unique_ptr<Literal> expected_literal = - LiteralUtil::CreateR3FromArray3D<NativeT>(expected); + Literal::CreateR3FromArray3D<NativeT>(expected); ClientLibraryTestBase::ComputeAndCompareLiteral(builder, *expected_literal, arguments); } @@ -360,7 +360,7 @@ void ClientLibraryTestBase::ComputeAndCompareR3( std::is_same<NativeT, double>::value, "Floating point type required when specifying an ErrorSpec"); std::unique_ptr<Literal> expected_literal = - LiteralUtil::CreateR3FromArray3D<NativeT>(expected); + Literal::CreateR3FromArray3D<NativeT>(expected); ClientLibraryTestBase::ComputeAndCompareLiteral(builder, *expected_literal, arguments, error); } @@ -370,7 +370,7 @@ void ClientLibraryTestBase::ComputeAndCompareR4( ComputationBuilder* builder, const Array4D<NativeT>& expected, tensorflow::gtl::ArraySlice<GlobalData*> arguments) { std::unique_ptr<Literal> expected_literal = - LiteralUtil::CreateR4FromArray4D<NativeT>(expected); + Literal::CreateR4FromArray4D<NativeT>(expected); ClientLibraryTestBase::ComputeAndCompareLiteral(builder, *expected_literal, arguments); } @@ -383,7 +383,7 @@ void ClientLibraryTestBase::ComputeAndCompareR4( std::is_same<NativeT, double>::value, "Floating point type required when specifying an ErrorSpec"); std::unique_ptr<Literal> expected_literal = - LiteralUtil::CreateR4FromArray4D<NativeT>(expected); + Literal::CreateR4FromArray4D<NativeT>(expected); ClientLibraryTestBase::ComputeAndCompareLiteral(builder, *expected_literal, arguments, error); } @@ -392,7 +392,7 @@ template <typename NativeT> std::unique_ptr<GlobalData> ClientLibraryTestBase::CreateR0Parameter( NativeT value, int64 parameter_number, const string& name, ComputationBuilder* builder, ComputationDataHandle* data_handle) { - std::unique_ptr<Literal> literal = LiteralUtil::CreateR0(value); + std::unique_ptr<Literal> literal = Literal::CreateR0(value); std::unique_ptr<GlobalData> data = client_->TransferToServer(*literal).ConsumeValueOrDie(); *data_handle = builder->Parameter(parameter_number, literal->shape(), name); @@ -404,7 +404,7 @@ std::unique_ptr<GlobalData> ClientLibraryTestBase::CreateR1Parameter( tensorflow::gtl::ArraySlice<NativeT> values, int64 parameter_number, const string& name, ComputationBuilder* builder, ComputationDataHandle* data_handle) { - std::unique_ptr<Literal> literal = LiteralUtil::CreateR1(values); + std::unique_ptr<Literal> literal = Literal::CreateR1(values); std::unique_ptr<GlobalData> data = client_->TransferToServer(*literal).ConsumeValueOrDie(); *data_handle = builder->Parameter(parameter_number, literal->shape(), name); @@ -416,7 +416,7 @@ std::unique_ptr<GlobalData> ClientLibraryTestBase::CreateR2Parameter( const Array2D<NativeT>& array_2d, int64 parameter_number, const string& name, ComputationBuilder* builder, ComputationDataHandle* data_handle) { - std::unique_ptr<Literal> literal = LiteralUtil::CreateR2FromArray2D(array_2d); + std::unique_ptr<Literal> literal = Literal::CreateR2FromArray2D(array_2d); std::unique_ptr<GlobalData> data = client_->TransferToServer(*literal).ConsumeValueOrDie(); *data_handle = builder->Parameter(parameter_number, literal->shape(), name); @@ -428,7 +428,7 @@ std::unique_ptr<GlobalData> ClientLibraryTestBase::CreateR3Parameter( const Array3D<NativeT>& array_3d, int64 parameter_number, const string& name, ComputationBuilder* builder, ComputationDataHandle* data_handle) { - std::unique_ptr<Literal> literal = LiteralUtil::CreateR3FromArray3D(array_3d); + std::unique_ptr<Literal> literal = Literal::CreateR3FromArray3D(array_3d); std::unique_ptr<GlobalData> data = client_->TransferToServer(*literal).ConsumeValueOrDie(); *data_handle = builder->Parameter(parameter_number, literal->shape(), name); diff --git a/tensorflow/compiler/xla/tests/compilation_cache_test.cc b/tensorflow/compiler/xla/tests/compilation_cache_test.cc index 3439be46fd..7038afc5b1 100644 --- a/tensorflow/compiler/xla/tests/compilation_cache_test.cc +++ b/tensorflow/compiler/xla/tests/compilation_cache_test.cc @@ -50,7 +50,7 @@ class CompilationCacheTest : public ClientLibraryTestBase { /*execution_options=*/&execution_options_, &execution_profile) .ConsumeValueOrDie(); - LiteralTestUtil::ExpectNear(*LiteralUtil::CreateR0<float>(expected_result), + LiteralTestUtil::ExpectNear(*Literal::CreateR0<float>(expected_result), *result, error_spec_); EXPECT_EQ(expect_cache_hit, execution_profile.compilation_cache_hit()); } @@ -67,7 +67,7 @@ class CompilationCacheTest : public ClientLibraryTestBase { .ConsumeValueOrDie(); std::unique_ptr<Literal> result = client_->Transfer(*data_handle).ConsumeValueOrDie(); - LiteralTestUtil::ExpectNear(*LiteralUtil::CreateR2<float>(expected_result), + LiteralTestUtil::ExpectNear(*Literal::CreateR2<float>(expected_result), *result, error_spec_); EXPECT_EQ(expect_cache_hit, execution_profile.compilation_cache_hit()); } @@ -87,13 +87,13 @@ XLA_TEST_F(CompilationCacheTest, ComputationCalledMultipleTimes) { XLA_TEST_F(CompilationCacheTest, ComputationCalledWithDifferentParameters) { std::unique_ptr<GlobalData> data_42 = - client_->TransferToServer(*LiteralUtil::CreateR0<float>(42.0f)) + client_->TransferToServer(*Literal::CreateR0<float>(42.0f)) .ConsumeValueOrDie(); std::unique_ptr<GlobalData> data_123 = - client_->TransferToServer(*LiteralUtil::CreateR0<float>(123.0f)) + client_->TransferToServer(*Literal::CreateR0<float>(123.0f)) .ConsumeValueOrDie(); std::unique_ptr<GlobalData> data_456 = - client_->TransferToServer(*LiteralUtil::CreateR0<float>(456.0f)) + client_->TransferToServer(*Literal::CreateR0<float>(456.0f)) .ConsumeValueOrDie(); ComputationBuilder builder(client_, TestName()); diff --git a/tensorflow/compiler/xla/tests/compute_constant_test.cc b/tensorflow/compiler/xla/tests/compute_constant_test.cc index 264ee9e1ca..4384c9b314 100644 --- a/tensorflow/compiler/xla/tests/compute_constant_test.cc +++ b/tensorflow/compiler/xla/tests/compute_constant_test.cc @@ -85,7 +85,7 @@ class ComputeConstantTest : public ::testing::Test { ComputationBuilder* builder) { TF_ASSIGN_OR_RETURN(auto literal, ComputeConstantLiteral(client, operand, builder)); - return LiteralUtil::Get<Scalar>(*literal, {}); + return literal->Get<Scalar>({}); } bool IsConstant(const ComputationDataHandle& operand, @@ -210,7 +210,7 @@ TEST_F(ComputeConstantTest, NonScalarAdd) { auto computed = ComputeConstantLiteral(client, computation, &b); ASSERT_TRUE(computed.ok()) << computed.status(); std::unique_ptr<Literal> expected_literal = - LiteralUtil::CreateR1<int32>({4, 6}); + Literal::CreateR1<int32>({4, 6}); LiteralTestUtil::ExpectEqual(*expected_literal, *computed.ValueOrDie()); } } @@ -224,7 +224,7 @@ TEST_F(ComputeConstantTest, IntegerDivide) { auto computed = ComputeConstantLiteral(client, computation, &b); ASSERT_TRUE(computed.ok()) << computed.status(); - std::unique_ptr<Literal> expected_literal = LiteralUtil::CreateR0<int32>(5); + std::unique_ptr<Literal> expected_literal = Literal::CreateR0<int32>(5); LiteralTestUtil::ExpectEqual(*expected_literal, *computed.ValueOrDie()); } } diff --git a/tensorflow/compiler/xla/tests/concat_test.cc b/tensorflow/compiler/xla/tests/concat_test.cc index d47144ae4b..c5d88ad6a0 100644 --- a/tensorflow/compiler/xla/tests/concat_test.cc +++ b/tensorflow/compiler/xla/tests/concat_test.cc @@ -517,8 +517,8 @@ TEST_P(ConcatR2BinaryTest, DoIt) { // concat XLA_TEST_F(ConcatTest, ConcatOperandsOfSameOperand) { auto f32_scalar = ShapeUtil::MakeShape(xla::F32, {}); - auto x_literal = LiteralUtil::CreateR0<float>(2.f); - auto y_literal = LiteralUtil::CreateR0<float>(3.f); + auto x_literal = Literal::CreateR0<float>(2.f); + auto y_literal = Literal::CreateR0<float>(3.f); auto x_data = client_->TransferToServer(*x_literal).ConsumeValueOrDie(); auto y_data = client_->TransferToServer(*y_literal).ConsumeValueOrDie(); @@ -539,9 +539,9 @@ XLA_TEST_F(ConcatTest, ConcatOperandsOfSameOperand) { // produces the correct result in rank 1. XLA_TEST_F(ConcatTest, ConcatBroadcastArgument) { auto f32_scalar = ShapeUtil::MakeShape(xla::F32, {}); - auto x_literal = LiteralUtil::CreateR1<float>({2.0f, 3.0f, 5.0f, 6.0f}); - auto y_literal = LiteralUtil::CreateR0<float>(1.5f); - auto z_literal = LiteralUtil::CreateR0<float>(5.5f); + auto x_literal = Literal::CreateR1<float>({2.0f, 3.0f, 5.0f, 6.0f}); + auto y_literal = Literal::CreateR0<float>(1.5f); + auto z_literal = Literal::CreateR0<float>(5.5f); auto x_data = client_->TransferToServer(*x_literal).ConsumeValueOrDie(); auto y_data = client_->TransferToServer(*y_literal).ConsumeValueOrDie(); auto z_data = client_->TransferToServer(*z_literal).ConsumeValueOrDie(); @@ -567,9 +567,9 @@ XLA_TEST_F(ConcatTest, ConcatBroadcastArgument) { XLA_TEST_F(ConcatTest, ConcatBroadcastArgumentR3) { auto f32_scalar = ShapeUtil::MakeShape(xla::F32, {}); Array3D<float> x3d(3, 5, 7, 3.14f); - auto x_literal = LiteralUtil::CreateR3FromArray3D<float>(x3d); - auto y_literal = LiteralUtil::CreateR0<float>(1.5f); - auto z_literal = LiteralUtil::CreateR0<float>(5.5f); + auto x_literal = Literal::CreateR3FromArray3D<float>(x3d); + auto y_literal = Literal::CreateR0<float>(1.5f); + auto z_literal = Literal::CreateR0<float>(5.5f); auto x_data = client_->TransferToServer(*x_literal).ConsumeValueOrDie(); auto y_data = client_->TransferToServer(*y_literal).ConsumeValueOrDie(); auto z_data = client_->TransferToServer(*z_literal).ConsumeValueOrDie(); diff --git a/tensorflow/compiler/xla/tests/constants_test.cc b/tensorflow/compiler/xla/tests/constants_test.cc index 356d4a7161..7c276c8c8d 100644 --- a/tensorflow/compiler/xla/tests/constants_test.cc +++ b/tensorflow/compiler/xla/tests/constants_test.cc @@ -112,7 +112,7 @@ TEST_F(ConstantsTest, Small_2x2) { TEST_F(ConstantsTest, Empty_3x0x2) { ComputationBuilder builder(client_, TestName()); auto constant = builder.ConstantLiteral( - *LiteralUtil::CreateR3FromArray3D<float>(Array3D<float>(3, 0, 2))); + *Literal::CreateR3FromArray3D<float>(Array3D<float>(3, 0, 2))); ComputeAndCompareR3<float>(&builder, Array3D<float>(3, 0, 2), {}); } @@ -127,8 +127,8 @@ TEST_F(ConstantsTest, Small_2x2x2) { {{5.f, 6.f}, // y0 {7.f, 8.f}}, // y1 }); - auto constant = builder.ConstantLiteral( - *LiteralUtil::CreateR3FromArray3D<float>(array3d)); + auto constant = + builder.ConstantLiteral(*Literal::CreateR3FromArray3D<float>(array3d)); ComputeAndCompareR3<float>(&builder, array3d, {}); } @@ -142,7 +142,7 @@ TEST_F(ConstantsTest, Small_3x2x1x1) { {5.0f, 4.4f}, // p2 }); input_array.FillWithPZ(pz); - Literal input_literal = *LiteralUtil::CreateR4FromArray4D(input_array); + Literal input_literal = *Literal::CreateR4FromArray4D(input_array); { ComputationBuilder builder(client_, TestName()); @@ -160,9 +160,9 @@ TEST_F(ConstantsTest, Small_3x2x1x1) { // TODO(b/29263943): Support tuple constants. TEST_F(ConstantsTest, DISABLED_TupleConstant) { ComputationBuilder builder(client_, TestName()); - builder.ConstantLiteral(*LiteralUtil::MakeTuple( - {LiteralUtil::CreateR2<float>({{1.0}, {2.0}}).get(), - LiteralUtil::CreateR1<float>({2.0, 42}).get()})); + builder.ConstantLiteral( + *Literal::MakeTuple({Literal::CreateR2<float>({{1.0}, {2.0}}).get(), + Literal::CreateR1<float>({2.0, 42}).get()})); std::unique_ptr<Literal> result = ExecuteAndTransferOrDie(&builder, {}); diff --git a/tensorflow/compiler/xla/tests/convolution_dimension_numbers_test.cc b/tensorflow/compiler/xla/tests/convolution_dimension_numbers_test.cc index 34c8ef4cee..fb50d9b0eb 100644 --- a/tensorflow/compiler/xla/tests/convolution_dimension_numbers_test.cc +++ b/tensorflow/compiler/xla/tests/convolution_dimension_numbers_test.cc @@ -62,8 +62,7 @@ XLA_TEST_F(ConvolutionDimensionNumbersTest, auto weight_array = MakeUnique<Array4D<float>>(4, 3, 1, 1); weight_array->FillWithMultiples(0.2); auto weight_data = - client_ - ->TransferToServer(*LiteralUtil::CreateR4FromArray4D(*weight_array)) + client_->TransferToServer(*Literal::CreateR4FromArray4D(*weight_array)) .ConsumeValueOrDie(); ComputationBuilder builder(client_, TestName()); diff --git a/tensorflow/compiler/xla/tests/convolution_test.cc b/tensorflow/compiler/xla/tests/convolution_test.cc index 8a6c5adb8a..a110082f9a 100644 --- a/tensorflow/compiler/xla/tests/convolution_test.cc +++ b/tensorflow/compiler/xla/tests/convolution_test.cc @@ -114,10 +114,10 @@ TEST_F(ConvolutionTest, Convolve_1x1x1x2_1x1x1x2_Valid) { ReferenceUtil::ConvArray4D(input, filter, {1, 1}, Padding::kValid); auto input_literal = - client_->TransferToServer(*LiteralUtil::CreateR4FromArray4D(input)) + client_->TransferToServer(*Literal::CreateR4FromArray4D(input)) .ConsumeValueOrDie(); auto filter_literal = - client_->TransferToServer(*LiteralUtil::CreateR4FromArray4D(filter)) + client_->TransferToServer(*Literal::CreateR4FromArray4D(filter)) .ConsumeValueOrDie(); ComputeAndCompareR4<float>(&builder, *aexpected, @@ -157,10 +157,10 @@ TEST_F(ConvolutionTest, Convolve_1x1x4x4_1x1x2x2_Valid) { ReferenceUtil::ConvArray4D(input, filter, {1, 1}, Padding::kValid); auto input_literal = - client_->TransferToServer(*LiteralUtil::CreateR4FromArray4D(input)) + client_->TransferToServer(*Literal::CreateR4FromArray4D(input)) .ConsumeValueOrDie(); auto filter_literal = - client_->TransferToServer(*LiteralUtil::CreateR4FromArray4D(filter)) + client_->TransferToServer(*Literal::CreateR4FromArray4D(filter)) .ConsumeValueOrDie(); ComputeAndCompareR4<float>(&builder, *aexpected, @@ -200,10 +200,10 @@ TEST_F(ConvolutionTest, Convolve_1x1x4x4_1x1x2x2_Same) { ReferenceUtil::ConvArray4D(input, filter, {1, 1}, Padding::kSame); auto input_literal = - client_->TransferToServer(*LiteralUtil::CreateR4FromArray4D(input)) + client_->TransferToServer(*Literal::CreateR4FromArray4D(input)) .ConsumeValueOrDie(); auto filter_literal = - client_->TransferToServer(*LiteralUtil::CreateR4FromArray4D(filter)) + client_->TransferToServer(*Literal::CreateR4FromArray4D(filter)) .ConsumeValueOrDie(); ComputeAndCompareR4<float>(&builder, *aexpected, @@ -245,10 +245,10 @@ TEST_F(ConvolutionTest, Convolve_1x1x4x4_1x1x3x3_Same) { ReferenceUtil::ConvArray4D(input, filter, {1, 1}, Padding::kSame); auto input_literal = - client_->TransferToServer(*LiteralUtil::CreateR4FromArray4D(input)) + client_->TransferToServer(*Literal::CreateR4FromArray4D(input)) .ConsumeValueOrDie(); auto filter_literal = - client_->TransferToServer(*LiteralUtil::CreateR4FromArray4D(filter)) + client_->TransferToServer(*Literal::CreateR4FromArray4D(filter)) .ConsumeValueOrDie(); ComputeAndCompareR4<float>(&builder, *aexpected, @@ -272,10 +272,10 @@ XLA_TEST_F(ConvolutionTest, Convolve1D_1x2x5_1x2x2_Valid) { Array3D<float> expected({{{510, 610, 710, 810}}}); auto input_literal = - client_->TransferToServer(*LiteralUtil::CreateR3FromArray3D(input)) + client_->TransferToServer(*Literal::CreateR3FromArray3D(input)) .ConsumeValueOrDie(); auto filter_literal = - client_->TransferToServer(*LiteralUtil::CreateR3FromArray3D(filter)) + client_->TransferToServer(*Literal::CreateR3FromArray3D(filter)) .ConsumeValueOrDie(); ComputeAndCompareR3<float>(&builder, expected, @@ -312,21 +312,18 @@ XLA_TEST_F(ConvolutionTest, Convolve3D_1x4x2x3x3_2x2x2x3x3_Valid) { std::vector<float> input_elems(ShapeUtil::ElementsIn(input_shape)); std::iota(input_elems.begin(), input_elems.end(), 1.0f); - auto input_r1 = LiteralUtil::CreateR1<float>(input_elems); - auto input_r5 = - LiteralUtil::Reshape(*input_r1, input_dims).ConsumeValueOrDie(); + auto input_r1 = Literal::CreateR1<float>(input_elems); + auto input_r5 = input_r1->Reshape(input_dims).ConsumeValueOrDie(); std::vector<float> filter_elems(ShapeUtil::ElementsIn(filter_shape)); std::iota(filter_elems.begin(), filter_elems.end(), 1.0f); - auto filter_r1 = LiteralUtil::CreateR1<float>(filter_elems); - auto filter_r5 = - LiteralUtil::Reshape(*filter_r1, filter_dims).ConsumeValueOrDie(); + auto filter_r1 = Literal::CreateR1<float>(filter_elems); + auto filter_r5 = filter_r1->Reshape(filter_dims).ConsumeValueOrDie(); - auto expected_r1 = LiteralUtil::CreateR1<float>( + auto expected_r1 = Literal::CreateR1<float>( {19554, 19962, 20370, 22110, 22590, 23070, 34890, 35730, 36570, 37446, 38358, 39270, 50226, 51498, 52770, 52782, 54126, 55470}); - auto expected_r5 = - LiteralUtil::Reshape(*expected_r1, {1, 3, 1, 2, 3}).ConsumeValueOrDie(); + auto expected_r5 = expected_r1->Reshape({1, 3, 1, 2, 3}).ConsumeValueOrDie(); auto input_literal = client_->TransferToServer(*input_r5).ConsumeValueOrDie(); auto filter_literal = diff --git a/tensorflow/compiler/xla/tests/convolution_variants_test.cc b/tensorflow/compiler/xla/tests/convolution_variants_test.cc index dfaa023e33..c8e74aa01a 100644 --- a/tensorflow/compiler/xla/tests/convolution_variants_test.cc +++ b/tensorflow/compiler/xla/tests/convolution_variants_test.cc @@ -1311,20 +1311,19 @@ TEST_F(ConvolutionVariantsTest, BackwardFilterEvenPadding1D) { TEST_F(ConvolutionVariantsTest, BackwardInputEvenPadding3D) { ComputationBuilder builder(client_, TestName()); - auto gradients_flat = LiteralUtil::CreateR1<float>({1}); + auto gradients_flat = Literal::CreateR1<float>({1}); auto gradients_literal = - LiteralUtil::Reshape(*gradients_flat, {1, 1, 1, 1, 1}) - .ConsumeValueOrDie(); + gradients_flat->Reshape({1, 1, 1, 1, 1}).ConsumeValueOrDie(); auto gradients = builder.ConstantLiteral(*gradients_literal); - auto weights_flat = LiteralUtil::CreateR1<float>({1, 10, 100}); + auto weights_flat = Literal::CreateR1<float>({1, 10, 100}); auto weights_literal = - LiteralUtil::Reshape(*weights_flat, {1, 1, 1, 1, 3}).ConsumeValueOrDie(); + weights_flat->Reshape({1, 1, 1, 1, 3}).ConsumeValueOrDie(); auto weights = builder.ConstantLiteral(*weights_literal); - auto expected_flat = LiteralUtil::CreateR1<float>({10}); + auto expected_flat = Literal::CreateR1<float>({10}); auto expected_literal = - LiteralUtil::Reshape(*expected_flat, {1, 1, 1, 1, 1}).ConsumeValueOrDie(); + expected_flat->Reshape({1, 1, 1, 1, 1}).ConsumeValueOrDie(); auto mirrored_weights = builder.Rev(weights, {2, 3, 4}); builder.ConvWithGeneralPadding(gradients, mirrored_weights, @@ -1336,21 +1335,19 @@ TEST_F(ConvolutionVariantsTest, BackwardInputEvenPadding3D) { TEST_F(ConvolutionVariantsTest, BackwardFilterEvenPadding3D) { ComputationBuilder builder(client_, TestName()); - auto activations_flat = LiteralUtil::CreateR1<float>({1, 2, 3, 4}); + auto activations_flat = Literal::CreateR1<float>({1, 2, 3, 4}); auto activations_literal = - LiteralUtil::Reshape(*activations_flat, {1, 1, 1, 1, 4}) - .ConsumeValueOrDie(); + activations_flat->Reshape({1, 1, 1, 1, 4}).ConsumeValueOrDie(); auto activations = builder.ConstantLiteral(*activations_literal); - auto gradients_flat = LiteralUtil::CreateR1<float>({100, 10, 1}); + auto gradients_flat = Literal::CreateR1<float>({100, 10, 1}); auto gradients_literal = - LiteralUtil::Reshape(*gradients_flat, {1, 1, 1, 1, 3}) - .ConsumeValueOrDie(); + gradients_flat->Reshape({1, 1, 1, 1, 3}).ConsumeValueOrDie(); auto gradients = builder.ConstantLiteral(*gradients_literal); - auto expected_flat = LiteralUtil::CreateR1<float>({13, 24, 130}); + auto expected_flat = Literal::CreateR1<float>({13, 24, 130}); auto expected_literal = - LiteralUtil::Reshape(*expected_flat, {1, 1, 1, 1, 3}).ConsumeValueOrDie(); + expected_flat->Reshape({1, 1, 1, 1, 3}).ConsumeValueOrDie(); auto forward_conv = builder.ConvGeneralDilated( activations, gradients, diff --git a/tensorflow/compiler/xla/tests/copy_test.cc b/tensorflow/compiler/xla/tests/copy_test.cc index 47bcc96c19..76ae280f1a 100644 --- a/tensorflow/compiler/xla/tests/copy_test.cc +++ b/tensorflow/compiler/xla/tests/copy_test.cc @@ -57,39 +57,34 @@ class CopyOpTest : public HloTestBase { tensorflow::gtl::ArraySlice<int64> permutation); }; -TEST_F(CopyOpTest, CopyR0Bool) { - TestCopyOp(*LiteralUtil::CreateR0<bool>(true)); -} +TEST_F(CopyOpTest, CopyR0Bool) { TestCopyOp(*Literal::CreateR0<bool>(true)); } -TEST_F(CopyOpTest, CopyR1S0U32) { - TestCopyOp(*LiteralUtil::CreateR1<uint32>({})); -} +TEST_F(CopyOpTest, CopyR1S0U32) { TestCopyOp(*Literal::CreateR1<uint32>({})); } TEST_F(CopyOpTest, CopyR1S3U32) { - TestCopyOp(*LiteralUtil::CreateR1<uint32>({1, 2, 3})); + TestCopyOp(*Literal::CreateR1<uint32>({1, 2, 3})); } TEST_F(CopyOpTest, CopyR3F32_2x2x3) { - TestCopyOp( - *LiteralUtil::CreateR3({{{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}}, - {{1.1f, 2.1f, 3.1f}, {6.1f, 3.5f, 2.8f}}})); + TestCopyOp(*Literal::CreateR3({{{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}}, + {{1.1f, 2.1f, 3.1f}, {6.1f, 3.5f, 2.8f}}})); } TEST_F(CopyOpTest, CopyR4S32_2x2x3x2) { - TestCopyOp(*LiteralUtil::CreateR4( + TestCopyOp(*Literal::CreateR4( {{{{1, -2}, {-4, 5}, {6, 7}}, {{8, 9}, {10, 11}, {12, 13}}}, {{{10, 3}, {7, -2}, {3, 6}}, {{2, 5}, {-11, 5}, {-2, -5}}}})); } TEST_F(CopyOpTest, CopyR4S32_0x2x3x2) { - TestCopyOp(*LiteralUtil::CreateR4FromArray4D(Array4D<int32>(0, 2, 3, 2))); + TestCopyOp(*Literal::CreateR4FromArray4D(Array4D<int32>(0, 2, 3, 2))); } TEST_F(CopyOpTest, CopyParameterScalar) { auto builder = HloComputation::Builder(TestName()); // Copy literal to device to use as parameter. - auto literal = LiteralUtil::CreateR0<float>(42.0); + auto literal = Literal::CreateR0<float>(42.0); Shape shape = literal->shape(); auto constant_device_base = TransferToDevice(*literal); @@ -111,7 +106,7 @@ TEST_F(CopyOpTest, CopyParameterScalar) { TEST_F(CopyOpTest, CopyConstantR2Twice) { auto builder = HloComputation::Builder(TestName()); - auto literal = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}); + auto literal = Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}); auto constant = builder.AddInstruction( HloInstruction::CreateConstant(std::move(literal))); @@ -133,7 +128,7 @@ TEST_F(CopyOpTest, CopyConstantR2DifferentLayouts) { HloComputation::Builder builder(TestName()); std::unique_ptr<Literal> literal = - LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}); + Literal::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}); // Reverse the minor-to-major order of the literal. Layout* literal_layout = literal->mutable_shape()->mutable_layout(); ASSERT_EQ(2, literal_layout->minor_to_major_size()); @@ -169,7 +164,7 @@ void CopyOpTest::TestCopyConstantLayout021(size_t n1, size_t n2, size_t n3) { HloComputation::Builder builder(TestName()); - std::unique_ptr<Literal> literal = LiteralUtil::CreateR3FromArray3D(a); + std::unique_ptr<Literal> literal = Literal::CreateR3FromArray3D(a); HloInstruction* constant = builder.AddInstruction( HloInstruction::CreateConstant(std::move(literal))); @@ -203,7 +198,7 @@ void CopyOpTest::TestCopyConstantLayoutR4( HloComputation::Builder builder(TestName()); - std::unique_ptr<Literal> literal = LiteralUtil::CreateR4FromArray4D(a); + std::unique_ptr<Literal> literal = Literal::CreateR4FromArray4D(a); HloInstruction* constant = builder.AddInstruction( HloInstruction::CreateConstant(std::move(literal))); @@ -246,7 +241,7 @@ using CopyOpClientTest = ClientLibraryTestBase; XLA_TEST_F(CopyOpClientTest, Copy0x0) { Shape in_shape = ShapeUtil::MakeShapeWithLayout(F32, {0, 0}, {0, 1}); Shape out_shape = ShapeUtil::MakeShapeWithLayout(F32, {0, 0}, {1, 0}); - auto empty = LiteralUtil::CreateFromShape(in_shape); + auto empty = Literal::CreateFromShape(in_shape); ComputationBuilder builder(client_, TestName()); auto param0 = builder.Parameter(0, in_shape, "input"); diff --git a/tensorflow/compiler/xla/tests/custom_call_test.cc b/tensorflow/compiler/xla/tests/custom_call_test.cc index 903c01f1c6..73772fdec0 100644 --- a/tensorflow/compiler/xla/tests/custom_call_test.cc +++ b/tensorflow/compiler/xla/tests/custom_call_test.cc @@ -67,7 +67,7 @@ XLA_TEST_F(CustomCallTest, DISABLED_ON_GPU(CustomCallR0F32Add2)) { auto builder = HloComputation::Builder(TestName()); auto constant = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))); + HloInstruction::CreateConstant(Literal::CreateR0<float>(42.0f))); builder.AddInstruction( HloInstruction::CreateCustomCall(r0f32_, {constant}, "R0F32Add2")); @@ -88,7 +88,7 @@ XLA_TEST_F(CustomCallTest, DISABLED_ON_GPU(CustomCallR2F32Reduce)) { array(1, 1) = 4.0f; auto constant = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR2FromArray2D(array))); + HloInstruction::CreateConstant(Literal::CreateR2FromArray2D(array))); builder.AddInstruction( HloInstruction::CreateCustomCall(r0f32_, {constant}, "R2F32ReduceSum")); @@ -104,7 +104,7 @@ XLA_TEST_F(CustomCallTest, auto b = HloComputation::Builder(TestName()); auto input = b.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR2FromArray2D( + HloInstruction::CreateConstant(Literal::CreateR2FromArray2D( Array2D<float>{{1.0f, 2.0f}, {3.0f, 4.0f}}))); auto incremented = b.AddInstruction(HloInstruction::CreateCustomCall( ShapeUtil::MakeShape(F32, {1, 2, 2}), {input}, "Add1ToValues")); diff --git a/tensorflow/compiler/xla/tests/deconstruct_tuple_test.cc b/tensorflow/compiler/xla/tests/deconstruct_tuple_test.cc index 23f9c8e880..3d6a995a24 100644 --- a/tensorflow/compiler/xla/tests/deconstruct_tuple_test.cc +++ b/tensorflow/compiler/xla/tests/deconstruct_tuple_test.cc @@ -173,7 +173,7 @@ TEST_F(DeconstructTupleTest, DeconstructNonTuple) { XLA_TEST_F(DeconstructTupleTest, DeconstructTupleFromParam) { ComputationBuilder builder(client_, TestName()); std::unique_ptr<Literal> param0_literal = - LiteralUtil::CreateR1<float>({3.14f, -100.25f}); + Literal::CreateR1<float>({3.14f, -100.25f}); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); auto p = builder.Parameter(0, ShapeUtil::MakeShape(F32, {2}), "param0"); diff --git a/tensorflow/compiler/xla/tests/dot_operation_test.cc b/tensorflow/compiler/xla/tests/dot_operation_test.cc index 1581e15d7b..b06b5c5f47 100644 --- a/tensorflow/compiler/xla/tests/dot_operation_test.cc +++ b/tensorflow/compiler/xla/tests/dot_operation_test.cc @@ -185,14 +185,14 @@ void DotOperationTest::TestMatrixDot(int M, int K, int N, bool lhs_row_major, bool rhs_row_major) { std::unique_ptr<Array2D<float>> lhs_data = MakeLinspaceArray2D(0.0, 1.0, M, K); - std::unique_ptr<Literal> lhs_lit = LiteralUtil::CreateR2FromArray2DWithLayout( + std::unique_ptr<Literal> lhs_lit = Literal::CreateR2FromArray2DWithLayout( *lhs_data, LayoutUtil::MakeLayout(MinorToMajorForIsRowMajor(lhs_row_major))); auto lhs_handle = client_->TransferToServer(*lhs_lit).ConsumeValueOrDie(); std::unique_ptr<Array2D<float>> rhs_data = MakeLinspaceArray2D(0.0, 1.0, K, N); - std::unique_ptr<Literal> rhs_lit = LiteralUtil::CreateR2FromArray2DWithLayout( + std::unique_ptr<Literal> rhs_lit = Literal::CreateR2FromArray2DWithLayout( *rhs_data, LayoutUtil::MakeLayout(MinorToMajorForIsRowMajor(rhs_row_major))); auto rhs_handle = client_->TransferToServer(*rhs_lit).ConsumeValueOrDie(); @@ -379,12 +379,12 @@ XLA_TEST_F(DotOperationTest, BatchMatMul) { builder.Reshape(out_flat, {0, 1, 2}, {2, 2, 2, 2}); auto x_data = client_ - ->TransferToServer(*LiteralUtil::CreateR4<float>( + ->TransferToServer(*Literal::CreateR4<float>( {{{{1000, 100}, {10, 1}}, {{2000, 200}, {20, 2}}}, {{{3000, 300}, {30, 3}}, {{4000, 400}, {40, 4}}}})) .ConsumeValueOrDie(); auto y_data = client_ - ->TransferToServer(*LiteralUtil::CreateR4<float>( + ->TransferToServer(*Literal::CreateR4<float>( {{{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}, {{{11, 22}, {33, 44}}, {{55, 66}, {77, 88}}}})) .ConsumeValueOrDie(); @@ -415,14 +415,14 @@ TEST_F(DotOperationTest, TransposeFolding) { auto lhs_handle = client_ ->TransferToServer( - *LiteralUtil::CreateR2FromArray2DWithLayout<float>( + *Literal::CreateR2FromArray2DWithLayout<float>( *lhs, LayoutUtil::MakeLayout( MinorToMajorForIsRowMajor(row_major)))) .ConsumeValueOrDie(); auto rhs_handle = client_ ->TransferToServer( - *LiteralUtil::CreateR2FromArray2DWithLayout<float>( + *Literal::CreateR2FromArray2DWithLayout<float>( *rhs, LayoutUtil::MakeLayout( MinorToMajorForIsRowMajor(row_major)))) .ConsumeValueOrDie(); diff --git a/tensorflow/compiler/xla/tests/dynamic_ops_test.cc b/tensorflow/compiler/xla/tests/dynamic_ops_test.cc index d86998b708..f653766f39 100644 --- a/tensorflow/compiler/xla/tests/dynamic_ops_test.cc +++ b/tensorflow/compiler/xla/tests/dynamic_ops_test.cc @@ -388,8 +388,8 @@ class DynamicUpdateSliceTest : public ClientLibraryTestBase { template <typename NativeT> void DumpArray(const string& name, const Array3D<NativeT> values) { std::unique_ptr<Literal> literal = - LiteralUtil::CreateR3FromArray3D<NativeT>(values); - LOG(INFO) << name << ":" << LiteralUtil::ToString(*literal); + Literal::CreateR3FromArray3D<NativeT>(values); + LOG(INFO) << name << ":" << literal->ToString(); } }; @@ -469,7 +469,7 @@ void BM_DynamicSlice(int num_iters) { ComputationBuilder builder(client, "DynamicSlice"); // Create input as a constant: shape [1, 2, 3, 4] - auto input_literal = LiteralUtil::CreateR4( + auto input_literal = Literal::CreateR4( {{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}, {{13, 14, 15, 16}, {17, 18, 19, 20}, {21, 22, 23, 24}}}}); auto input = builder.ConstantLiteral(*input_literal); @@ -487,7 +487,7 @@ void BM_DynamicSlice(int num_iters) { &allocator, 0) .ConsumeValueOrDie(); - auto start_indices_literal = LiteralUtil::CreateR1<int32>({0, 1, 2, 3}); + auto start_indices_literal = Literal::CreateR1<int32>({0, 1, 2, 3}); ASSERT_IS_OK(transfer_manager->TransferLiteralToDevice( executors[device_ordinal], *start_indices_literal, buffer->mutable_buffer({}))); diff --git a/tensorflow/compiler/xla/tests/fusion_test.cc b/tensorflow/compiler/xla/tests/fusion_test.cc index 0e4fc33579..3546e9a1e8 100644 --- a/tensorflow/compiler/xla/tests/fusion_test.cc +++ b/tensorflow/compiler/xla/tests/fusion_test.cc @@ -80,7 +80,7 @@ class FusionTest : public HloTestBase { HloInstruction* hlos[4]; for (int i = 0; i < Arity; ++i) { hlos[i + 1] = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2FromArray2D(operand_data[i]))); + Literal::CreateR2FromArray2D(operand_data[i]))); } auto answer_shape = ShapeUtil::MakeShape(prim_type, {test_width, test_height}); @@ -106,7 +106,7 @@ class FusionTest : public HloTestBase { ArraySlice<HloInstruction*>(hlos, 0, Arity + 1), HloInstruction::FusionKind::kLoop); - auto expected = LiteralUtil::CreateR2FromArray2D(answer_data); + auto expected = Literal::CreateR2FromArray2D(answer_data); auto actual = ExecuteAndTransfer(std::move(hlo_module), {}); if (primitive_util::IsFloatingPointType(prim_type)) { LiteralTestUtil::ExpectNear(*expected, *actual, ErrorSpec(1e-4)); @@ -177,28 +177,27 @@ XLA_TEST_F(FusionTest, Test) { auto builder = HloComputation::Builder(TestName()); auto hlo_module = CreateNewModule(); auto const0 = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<float>({{1.0}, {2.0}, {3.0}}))); + Literal::CreateR2<float>({{1.0}, {2.0}, {3.0}}))); auto const1 = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<float>({{-1.0}, {-1.0}, {-1.0}}))); + Literal::CreateR2<float>({{-1.0}, {-1.0}, {-1.0}}))); auto add2 = builder.AddInstruction(HloInstruction::CreateBinary( ShapeUtil::MakeShape(F32, {3, 1}), HloOpcode::kAdd, const0, const1)); auto reshape3 = builder.AddInstruction(HloInstruction::CreateTranspose( ShapeUtil::MakeShape(F32, {1, 3}), add2, {1, 0})); auto const4 = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<float>({{1.62, 2.72, 3.14}}))); + Literal::CreateR2<float>({{1.62, 2.72, 3.14}}))); auto concat5 = builder.AddInstruction(HloInstruction::CreateConcatenate( ShapeUtil::MakeShape(F32, {2, 3}), {reshape3, const4}, 0)); auto const6 = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<float>({{1.0, 1.0, 1.0}, {0.0, 0.0, 0.0}}))); + Literal::CreateR2<float>({{1.0, 1.0, 1.0}, {0.0, 0.0, 0.0}}))); auto negate7 = builder.AddInstruction(HloInstruction::CreateUnary( ShapeUtil::MakeShape(F32, {2, 3}), HloOpcode::kNegate, const6)); auto add8 = builder.AddInstruction(HloInstruction::CreateBinary( ShapeUtil::MakeShape(F32, {2, 3}), HloOpcode::kAdd, concat5, negate7)); auto const9 = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<float>({{0.5, 0.5, 0.5}, {0.5, 0.5, 0.5}}))); - auto const10 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR2<bool>( - {{true, false, true}, {false, true, false}}))); + Literal::CreateR2<float>({{0.5, 0.5, 0.5}, {0.5, 0.5, 0.5}}))); + auto const10 = builder.AddInstruction(HloInstruction::CreateConstant( + Literal::CreateR2<bool>({{true, false, true}, {false, true, false}}))); auto select11 = builder.AddInstruction( HloInstruction::CreateTernary(ShapeUtil::MakeShape(F32, {2, 3}), HloOpcode::kSelect, const10, add8, const9)); @@ -213,7 +212,7 @@ XLA_TEST_F(FusionTest, Test) { const4, reshape3, add2, const1, const0}, HloInstruction::FusionKind::kLoop); - LiteralTestUtil::ExpectNear(*LiteralUtil::CreateR2<float>({{0.5}, {2.72}}), + LiteralTestUtil::ExpectNear(*Literal::CreateR2<float>({{0.5}, {2.72}}), *ExecuteAndTransfer(std::move(hlo_module), {}), ErrorSpec(1e-4)); } @@ -225,11 +224,11 @@ XLA_TEST_F(FusionTest, Parameter) { auto builder = HloComputation::Builder(TestName()); auto hlo_module = CreateNewModule(); auto const0 = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<float>({{1.0, 2.0, 3.0}}))); + Literal::CreateR2<float>({{1.0, 2.0, 3.0}}))); auto copy1 = builder.AddInstruction(HloInstruction::CreateUnary( ShapeUtil::MakeShape(F32, {1, 3}), HloOpcode::kCopy, const0)); auto const2 = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<float>({{-2.0, -2.0, -2.0}}))); + Literal::CreateR2<float>({{-2.0, -2.0, -2.0}}))); // add3 = copy1 + const2 = const0 + const2 = {1,2,3} + {-2,-2,-2} = {-1,0,+1} auto add3 = builder.AddInstruction(HloInstruction::CreateBinary( ShapeUtil::MakeShape(F32, {1, 3}), HloOpcode::kAdd, copy1, const2)); @@ -239,7 +238,7 @@ XLA_TEST_F(FusionTest, Parameter) { ->CreateFusionInstruction(/*instructions_to_fuse=*/{add3, const2}, HloInstruction::FusionKind::kLoop); - LiteralTestUtil::ExpectNear(*LiteralUtil::CreateR2<float>({{-1.0, 0.0, 1.0}}), + LiteralTestUtil::ExpectNear(*Literal::CreateR2<float>({{-1.0, 0.0, 1.0}}), *ExecuteAndTransfer(std::move(hlo_module), {}), ErrorSpec(1e-4)); } @@ -248,9 +247,9 @@ XLA_TEST_F(FusionTest, BroadcastIntoBinaryOp) { auto builder = HloComputation::Builder(TestName()); auto hlo_module = CreateNewModule(); auto const_vector = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0}))); + Literal::CreateR1<float>({1.0, 2.0, 3.0}))); auto const_array = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<float>({{-1.0, -2.0, -4.0}, {10.0, 20.0, 30.0}}))); + Literal::CreateR2<float>({{-1.0, -2.0, -4.0}, {10.0, 20.0, 30.0}}))); auto broadcast = builder.AddInstruction( HloInstruction::CreateBroadcast(const_array->shape(), const_vector, {1})); // add2 = broadcast(const_vector) + const_array @@ -264,7 +263,7 @@ XLA_TEST_F(FusionTest, BroadcastIntoBinaryOp) { HloInstruction::FusionKind::kLoop); LiteralTestUtil::ExpectNear( - *LiteralUtil::CreateR2<float>({{0.0, 0.0, -1.0}, {11.0, 22.0, 33.0}}), + *Literal::CreateR2<float>({{0.0, 0.0, -1.0}, {11.0, 22.0, 33.0}}), *ExecuteAndTransfer(std::move(hlo_module), {}), ErrorSpec(1e-4)); } @@ -272,13 +271,13 @@ XLA_TEST_F(FusionTest, ReshapeToScalar) { auto builder = HloComputation::Builder(TestName()); auto hlo_module = CreateNewModule(); auto single_element_array = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR2<int32>({{5}}))); + HloInstruction::CreateConstant(Literal::CreateR2<int32>({{5}}))); auto reshape = builder.AddInstruction(HloInstruction::CreateReshape( ShapeUtil::MakeShape(S32, {}), single_element_array)); hlo_module->AddEntryComputation(builder.Build()) ->CreateFusionInstruction(/*instructions_to_fuse=*/{reshape}, HloInstruction::FusionKind::kLoop); - LiteralTestUtil::ExpectEqual(*LiteralUtil::CreateR0<int32>(5), + LiteralTestUtil::ExpectEqual(*Literal::CreateR0<int32>(5), *ExecuteAndTransfer(std::move(hlo_module), {})); } @@ -286,14 +285,14 @@ XLA_TEST_F(FusionTest, Reshape_3by2_1by2by3) { auto builder = HloComputation::Builder(TestName()); auto hlo_module = CreateNewModule(); auto const0 = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<int32>({{1, 2}, {3, 4}, {5, 6}}))); + Literal::CreateR2<int32>({{1, 2}, {3, 4}, {5, 6}}))); auto reshape1 = builder.AddInstruction(HloInstruction::CreateReshape( ShapeUtil::MakeShape(S32, {1, 2, 3}), const0)); hlo_module->AddEntryComputation(builder.Build()) ->CreateFusionInstruction(/*instructions_to_fuse=*/{reshape1}, HloInstruction::FusionKind::kLoop); LiteralTestUtil::ExpectEqual( - *LiteralUtil::CreateR3<int32>({{{1, 2, 3}, {4, 5, 6}}}), + *Literal::CreateR3<int32>({{{1, 2, 3}, {4, 5, 6}}}), *ExecuteAndTransfer(std::move(hlo_module), {})); } @@ -301,14 +300,14 @@ XLA_TEST_F(FusionTest, Reshape_1by2by3_3by2) { auto builder = HloComputation::Builder(TestName()); auto hlo_module = CreateNewModule(); auto const0 = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR3<int32>({{{1, 2, 3}, {4, 5, 6}}}))); + Literal::CreateR3<int32>({{{1, 2, 3}, {4, 5, 6}}}))); auto reshape1 = builder.AddInstruction( HloInstruction::CreateReshape(ShapeUtil::MakeShape(S32, {3, 2}), const0)); hlo_module->AddEntryComputation(builder.Build()) ->CreateFusionInstruction(/*instructions_to_fuse=*/{reshape1}, HloInstruction::FusionKind::kLoop); LiteralTestUtil::ExpectEqual( - *LiteralUtil::CreateR2<int32>({{1, 2}, {3, 4}, {5, 6}}), + *Literal::CreateR2<int32>({{1, 2}, {3, 4}, {5, 6}}), *ExecuteAndTransfer(std::move(hlo_module), {})); } @@ -316,13 +315,13 @@ XLA_TEST_F(FusionTest, Reshape_1by1by1_) { auto builder = HloComputation::Builder(TestName()); auto hlo_module = CreateNewModule(); auto const0 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR3<int32>({{{7}}}))); + HloInstruction::CreateConstant(Literal::CreateR3<int32>({{{7}}}))); auto reshape1 = builder.AddInstruction( HloInstruction::CreateReshape(ShapeUtil::MakeShape(S32, {}), const0)); hlo_module->AddEntryComputation(builder.Build()) ->CreateFusionInstruction(/*instructions_to_fuse=*/{reshape1}, HloInstruction::FusionKind::kLoop); - LiteralTestUtil::ExpectEqual(*LiteralUtil::CreateR0<int32>(7), + LiteralTestUtil::ExpectEqual(*Literal::CreateR0<int32>(7), *ExecuteAndTransfer(std::move(hlo_module), {})); } @@ -330,13 +329,13 @@ XLA_TEST_F(FusionTest, Reshape__1by1by1) { auto builder = HloComputation::Builder(TestName()); auto hlo_module = CreateNewModule(); auto const0 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(7))); + HloInstruction::CreateConstant(Literal::CreateR0<int32>(7))); auto reshape1 = builder.AddInstruction(HloInstruction::CreateReshape( ShapeUtil::MakeShape(S32, {1, 1, 1}), const0)); hlo_module->AddEntryComputation(builder.Build()) ->CreateFusionInstruction(/*instructions_to_fuse=*/{reshape1}, HloInstruction::FusionKind::kLoop); - LiteralTestUtil::ExpectEqual(*LiteralUtil::CreateR3<int32>({{{7}}}), + LiteralTestUtil::ExpectEqual(*Literal::CreateR3<int32>({{{7}}}), *ExecuteAndTransfer(std::move(hlo_module), {})); } @@ -344,13 +343,13 @@ XLA_TEST_F(FusionTest, Reshape__) { auto builder = HloComputation::Builder(TestName()); auto hlo_module = CreateNewModule(); auto const0 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(7))); + HloInstruction::CreateConstant(Literal::CreateR0<int32>(7))); auto reshape1 = builder.AddInstruction( HloInstruction::CreateReshape(ShapeUtil::MakeShape(S32, {}), const0)); hlo_module->AddEntryComputation(builder.Build()) ->CreateFusionInstruction(/*instructions_to_fuse=*/{reshape1}, HloInstruction::FusionKind::kLoop); - LiteralTestUtil::ExpectEqual(*LiteralUtil::CreateR0<int32>(7), + LiteralTestUtil::ExpectEqual(*Literal::CreateR0<int32>(7), *ExecuteAndTransfer(std::move(hlo_module), {})); } @@ -358,14 +357,14 @@ XLA_TEST_F(FusionTest, Reshape_3by3_3by3) { auto builder = HloComputation::Builder(TestName()); auto hlo_module = CreateNewModule(); auto const0 = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}))); + Literal::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}))); auto reshape1 = builder.AddInstruction( HloInstruction::CreateReshape(ShapeUtil::MakeShape(S32, {3, 3}), const0)); hlo_module->AddEntryComputation(builder.Build()) ->CreateFusionInstruction(/*instructions_to_fuse=*/{reshape1}, HloInstruction::FusionKind::kLoop); LiteralTestUtil::ExpectEqual( - *LiteralUtil::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}), + *Literal::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}), *ExecuteAndTransfer(std::move(hlo_module), {})); } @@ -373,14 +372,14 @@ XLA_TEST_F(FusionTest, Transpose_2by3) { auto builder = HloComputation::Builder(TestName()); auto hlo_module = CreateNewModule(); auto const0 = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}}))); + Literal::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}}))); auto reshape1 = builder.AddInstruction(HloInstruction::CreateTranspose( ShapeUtil::MakeShape(S32, {3, 2}), const0, {1, 0})); hlo_module->AddEntryComputation(builder.Build()) ->CreateFusionInstruction(/*instructions_to_fuse=*/{reshape1}, HloInstruction::FusionKind::kLoop); LiteralTestUtil::ExpectEqual( - *LiteralUtil::CreateR2<int32>({{1, 4}, {2, 5}, {3, 6}}), + *Literal::CreateR2<int32>({{1, 4}, {2, 5}, {3, 6}}), *ExecuteAndTransfer(std::move(hlo_module), {})); } @@ -388,14 +387,14 @@ XLA_TEST_F(FusionTest, Transpose_3by3) { auto builder = HloComputation::Builder(TestName()); auto hlo_module = CreateNewModule(); auto const0 = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}))); + Literal::CreateR2<int32>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}))); auto reshape1 = builder.AddInstruction(HloInstruction::CreateTranspose( ShapeUtil::MakeShape(S32, {3, 3}), const0, {1, 0})); hlo_module->AddEntryComputation(builder.Build()) ->CreateFusionInstruction(/*instructions_to_fuse=*/{reshape1}, HloInstruction::FusionKind::kLoop); LiteralTestUtil::ExpectEqual( - *LiteralUtil::CreateR2<int32>({{1, 4, 7}, {2, 5, 8}, {3, 6, 9}}), + *Literal::CreateR2<int32>({{1, 4, 7}, {2, 5, 8}, {3, 6, 9}}), *ExecuteAndTransfer(std::move(hlo_module), {})); } @@ -403,14 +402,14 @@ XLA_TEST_F(FusionTest, Reverse) { auto builder = HloComputation::Builder(TestName()); auto hlo_module = CreateNewModule(); auto const0 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR1<int32>({1, 2, 3}))); + HloInstruction::CreateConstant(Literal::CreateR1<int32>({1, 2, 3}))); auto reverse1 = builder.AddInstruction(HloInstruction::CreateReverse( ShapeUtil::MakeShape(S32, {3}), const0, {0})); hlo_module->AddEntryComputation(builder.Build()) ->CreateFusionInstruction(/*instructions_to_fuse=*/{reverse1}, HloInstruction::FusionKind::kLoop); - LiteralTestUtil::ExpectEqual(*LiteralUtil::CreateR1<int32>({3, 2, 1}), + LiteralTestUtil::ExpectEqual(*Literal::CreateR1<int32>({3, 2, 1}), *ExecuteAndTransfer(std::move(hlo_module), {})); } @@ -429,10 +428,10 @@ XLA_TEST_F(FusionTest, DISABLED_ON_CPU(Reduce)) { auto hlo_module = CreateNewModule(); auto builder = HloComputation::Builder(TestName()); - auto const0 = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR1<int32>({1, 2, 4, 8}))); + auto const0 = builder.AddInstruction( + HloInstruction::CreateConstant(Literal::CreateR1<int32>({1, 2, 4, 8}))); auto const1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(0))); + HloInstruction::CreateConstant(Literal::CreateR0<int32>(0))); auto reduce2 = builder.AddInstruction(HloInstruction::CreateReduce( ShapeUtil::MakeShape(S32, {}), const0, const1, {0}, hlo_module->AddEmbeddedComputation(MakeReduceTestComputation()))); @@ -440,7 +439,7 @@ XLA_TEST_F(FusionTest, DISABLED_ON_CPU(Reduce)) { ->CreateFusionInstruction(/*instructions_to_fuse=*/{reduce2}, HloInstruction::FusionKind::kLoop); - LiteralTestUtil::ExpectEqual(*LiteralUtil::CreateR0<int32>(15), + LiteralTestUtil::ExpectEqual(*Literal::CreateR0<int32>(15), *ExecuteAndTransfer(std::move(hlo_module), {})); } @@ -448,10 +447,10 @@ XLA_TEST_F(FusionTest, DISABLED_ON_CPU(ReduceImplicitBroadcast)) { auto hlo_module = CreateNewModule(); auto builder = HloComputation::Builder(TestName()); - auto const0 = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR1<int32>({1, 2, 4, 8}))); + auto const0 = builder.AddInstruction( + HloInstruction::CreateConstant(Literal::CreateR1<int32>({1, 2, 4, 8}))); auto const1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(0))); + HloInstruction::CreateConstant(Literal::CreateR0<int32>(0))); auto reduce2 = builder.AddInstruction(HloInstruction::CreateReduce( ShapeUtil::MakeShape(S32, {}), const0, const1, {0}, hlo_module->AddEmbeddedComputation(MakeReduceTestComputation()))); @@ -461,7 +460,7 @@ XLA_TEST_F(FusionTest, DISABLED_ON_CPU(ReduceImplicitBroadcast)) { ->CreateFusionInstruction(/*instructions_to_fuse=*/{negate3, reduce2}, HloInstruction::FusionKind::kLoop); - LiteralTestUtil::ExpectEqual(*LiteralUtil::CreateR1<int32>({-15}), + LiteralTestUtil::ExpectEqual(*Literal::CreateR1<int32>({-15}), *ExecuteAndTransfer(std::move(hlo_module), {})); } @@ -469,9 +468,9 @@ XLA_TEST_F(FusionTest, DISABLED_ON_CPU(ReduceWindow)) { auto builder = HloComputation::Builder(TestName()); auto hlo_module = CreateNewModule(); auto const0 = builder.AddInstruction(HloInstruction::CreateConstant( - LiteralUtil::CreateR2<int32>({{2, 3, 5}, {7, 11, 13}, {17, 19, 23}}))); + Literal::CreateR2<int32>({{2, 3, 5}, {7, 11, 13}, {17, 19, 23}}))); auto const1 = builder.AddInstruction( - HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(1))); + HloInstruction::CreateConstant(Literal::CreateR0<int32>(1))); Window window; ASSERT_TRUE( tensorflow::protobuf::TextFormat::ParseFromString("dimensions:{\n" @@ -511,7 +510,7 @@ XLA_TEST_F(FusionTest, DISABLED_ON_CPU(ReduceWindow)) { HloInstruction::FusionKind::kLoop); LiteralTestUtil::ExpectEqual( - *LiteralUtil::CreateR2<int32>({{462, 2145}, {24871, 62491}}), + *Literal::CreateR2<int32>({{462, 2145}, {24871, 62491}}), *ExecuteAndTransfer(std::move(hlo_module), {})); } diff --git a/tensorflow/compiler/xla/tests/literal_test_util.cc b/tensorflow/compiler/xla/tests/literal_test_util.cc index f3a49059c9..69c12cc437 100644 --- a/tensorflow/compiler/xla/tests/literal_test_util.cc +++ b/tensorflow/compiler/xla/tests/literal_test_util.cc @@ -133,8 +133,8 @@ bool ExpectLiteralsEqual(const Literal& expected, const Literal& actual, tensorflow::gtl::MutableArraySlice<int64> multi_index, int64 dimension) { if (dimension == expected.shape().dimensions_size()) { - NativeT expected_value = LiteralUtil::Get<NativeT>(expected, multi_index); - NativeT actual_value = LiteralUtil::Get<NativeT>(actual, multi_index); + NativeT expected_value = expected.Get<NativeT>(multi_index); + NativeT actual_value = actual.Get<NativeT>(multi_index); ::testing::AssertionResult result = CompareEqual<NativeT>(expected_value, actual_value); return result; // Defines implicit coersion to bool. @@ -153,10 +153,10 @@ bool ExpectLiteralsEqual(const Literal& expected, const Literal& actual, /* static */ void LiteralTestUtil::ExpectEqual(const Literal& expected, const Literal& actual) { - EXPECT_TRUE(Equal(expected, actual)) << "expected:\n" - << LiteralUtil::ToString(expected) - << "\n\tvs actual:\n" - << LiteralUtil::ToString(actual); + EXPECT_TRUE(Equal(expected, actual)) + << "expected:\n" + << expected.ToString() << "\n\tvs actual:\n" + << actual.ToString(); } /* static */ void LiteralTestUtil::ExpectNotEqual(const Literal& expected, @@ -166,8 +166,8 @@ bool ExpectLiteralsEqual(const Literal& expected, const Literal& actual, /* static */ ::testing::AssertionResult LiteralTestUtil::Equal( const Literal& expected, const Literal& actual) { - VLOG(1) << "expected: " << LiteralUtil::ToString(expected); - VLOG(1) << "actual: " << LiteralUtil::ToString(actual); + VLOG(1) << "expected: " << expected.ToString(); + VLOG(1) << "actual: " << actual.ToString(); AssertEqualShapes(expected.shape(), actual.shape()); std::vector<int64> multi_index(expected.shape().dimensions_size(), 0); @@ -215,8 +215,8 @@ bool ExpectLiteralsEqual(const Literal& expected, const Literal& actual, ::testing::AssertionResult result = ::testing::AssertionSuccess(); if (!match) { result = ::testing::AssertionFailure() - << "expected: " << LiteralUtil::ToString(expected) - << "\nactual: " << LiteralUtil::ToString(actual); + << "expected: " << expected.ToString() + << "\nactual: " << actual.ToString(); VLOG(1) << result.message(); } return result; @@ -224,8 +224,8 @@ bool ExpectLiteralsEqual(const Literal& expected, const Literal& actual, /* static */ void LiteralTestUtil::ExpectEqualTuple(const Literal& expected, const Literal& actual) { - VLOG(1) << "expected: " << LiteralUtil::ToString(expected); - VLOG(1) << "actual: " << LiteralUtil::ToString(actual); + VLOG(1) << "expected: " << expected.ToString(); + VLOG(1) << "actual: " << actual.ToString(); ASSERT_TRUE(ShapeUtil::IsTuple(expected.shape())); ASSERT_TRUE(ShapeUtil::IsTuple(actual.shape())); @@ -252,8 +252,8 @@ class NearComparator { // within the error bound. Emits useful log messages and dumps literals to // temporary files on failure. Returns true if literals match. bool ExpectNear(const Literal& expected, const Literal& actual) { - VLOG(1) << "expected: " << LiteralUtil::ToString(expected); - VLOG(1) << "actual: " << LiteralUtil::ToString(actual); + VLOG(1) << "expected: " << expected.ToString(); + VLOG(1) << "actual: " << actual.ToString(); LiteralTestUtil::AssertEqualShapes(expected.shape(), actual.shape()); @@ -287,9 +287,9 @@ class NearComparator { if (num_miscompares_ > 0) { if (!VLOG_IS_ON(1)) { LOG(INFO) << "expected: " << ShapeUtil::HumanString(expected.shape()) - << " " << LiteralUtil::ToString(expected); + << " " << expected.ToString(); LOG(INFO) << "actual: " << ShapeUtil::HumanString(actual.shape()) - << " " << LiteralUtil::ToString(actual); + << " " << actual.ToString(); } EXPECT_TRUE(num_miscompares_ == 0) << "\nmax relative mismatch at index " @@ -374,10 +374,9 @@ class NearComparator { void ExpectLiteralsNear(const Literal& expected, const Literal& actual, int64 dimension) { if (dimension == expected.shape().dimensions_size()) { - bool near = - ExpectValuesNear(LiteralUtil::Get<NativeT>(expected, multi_index_), - LiteralUtil::Get<NativeT>(actual, multi_index_)); - LiteralUtil::Set<bool>(&miscompares_, multi_index_, !near); + bool near = ExpectValuesNear(expected.Get<NativeT>(multi_index_), + actual.Get<NativeT>(multi_index_)); + miscompares_.Set<bool>(multi_index_, !near); } else { for (int64 i = 0; i < expected.shape().dimensions(dimension); ++i) { multi_index_[dimension] = i; @@ -442,8 +441,8 @@ class NearComparator { /* static */ ::testing::AssertionResult LiteralTestUtil::NearTuple( const Literal& expected, const Literal& actual, const ErrorSpec& error) { - VLOG(1) << "expected: " << LiteralUtil::ToString(expected); - VLOG(1) << "actual: " << LiteralUtil::ToString(actual); + VLOG(1) << "expected: " << expected.ToString(); + VLOG(1) << "actual: " << actual.ToString(); if (!ShapeUtil::IsTuple(expected.shape()) || !ShapeUtil::IsTuple(actual.shape())) { @@ -509,8 +508,7 @@ class NearComparator { *shape_with_layout.mutable_layout() = LayoutUtil::MakeLayout(minor_to_major); // Allocate space in the new literal. - LiteralUtil::Reserve(ShapeUtil::ElementsIn(literal.shape()), - new_literal.get()); + new_literal.get()->Reserve(ShapeUtil::ElementsIn(literal.shape())); // Copy data into new literal, element-by-element. for (int64 i = 0; i < ShapeUtil::ElementsIn(literal.shape()); ++i) { @@ -520,44 +518,36 @@ class NearComparator { IndexUtil::LinearIndexToMultidimensionalIndex(shape_with_layout, i); switch (literal.shape().element_type()) { case PRED: - LiteralUtil::Set<bool>( - new_literal.get(), to_multi_index, - LiteralUtil::Get<bool>(literal, from_multi_index)); + new_literal.get()->Set<bool>(to_multi_index, + literal.Get<bool>(from_multi_index)); break; case U8: - LiteralUtil::Set<uint8>( - new_literal.get(), to_multi_index, - LiteralUtil::Get<uint8>(literal, from_multi_index)); + new_literal.get()->Set<uint8>(to_multi_index, + literal.Get<uint8>(from_multi_index)); break; case U32: - LiteralUtil::Set<uint32>( - new_literal.get(), to_multi_index, - LiteralUtil::Get<uint32>(literal, from_multi_index)); + new_literal.get()->Set<uint32>(to_multi_index, + literal.Get<uint32>(from_multi_index)); break; case S32: - LiteralUtil::Set<int32>( - new_literal.get(), to_multi_index, - LiteralUtil::Get<int32>(literal, from_multi_index)); + new_literal.get()->Set<int32>(to_multi_index, + literal.Get<int32>(from_multi_index)); break; case U64: - LiteralUtil::Set<uint64>( - new_literal.get(), to_multi_index, - LiteralUtil::Get<uint64>(literal, from_multi_index)); + new_literal.get()->Set<uint64>(to_multi_index, + literal.Get<uint64>(from_multi_index)); break; case S64: - LiteralUtil::Set<int64>( - new_literal.get(), to_multi_index, - LiteralUtil::Get<int64>(literal, from_multi_index)); + new_literal.get()->Set<int64>(to_multi_index, + literal.Get<int64>(from_multi_index)); break; case F32: - LiteralUtil::Set<float>( - new_literal.get(), to_multi_index, - LiteralUtil::Get<float>(literal, from_multi_index)); + new_literal.get()->Set<float>(to_multi_index, + literal.Get<float>(from_multi_index)); break; case F64: - LiteralUtil::Set<double>( - new_literal.get(), to_multi_index, - LiteralUtil::Get<double>(literal, from_multi_index)); + new_literal.get()->Set<double>(to_multi_index, + literal.Get<double>(from_multi_index)); break; default: LOG(FATAL) << "Unhandled primitive element type: " diff --git a/tensorflow/compiler/xla/tests/literal_test_util.h b/tensorflow/compiler/xla/tests/literal_test_util.h index a8b07a2c5d..0def25f34e 100644 --- a/tensorflow/compiler/xla/tests/literal_test_util.h +++ b/tensorflow/compiler/xla/tests/literal_test_util.h @@ -210,20 +210,20 @@ class LiteralTestUtil { template <typename NativeT> /* static */ void LiteralTestUtil::ExpectR0Equal(NativeT expected, const Literal& actual) { - ExpectEqual(*LiteralUtil::CreateR0<NativeT>(expected), actual); + ExpectEqual(*Literal::CreateR0<NativeT>(expected), actual); } template <typename NativeT> /* static */ void LiteralTestUtil::ExpectR1Equal( tensorflow::gtl::ArraySlice<NativeT> expected, const Literal& actual) { - ExpectEqual(*LiteralUtil::CreateR1<NativeT>(expected), actual); + ExpectEqual(*Literal::CreateR1<NativeT>(expected), actual); } template <typename NativeT> /* static */ void LiteralTestUtil::ExpectR2Equal( std::initializer_list<std::initializer_list<NativeT>> expected, const Literal& actual) { - ExpectEqual(*LiteralUtil::CreateR2<NativeT>(expected), actual); + ExpectEqual(*Literal::CreateR2<NativeT>(expected), actual); } template <typename NativeT> @@ -231,46 +231,46 @@ template <typename NativeT> std::initializer_list<std::initializer_list<std::initializer_list<NativeT>>> expected, const Literal& actual) { - ExpectEqual(*LiteralUtil::CreateR3<NativeT>(expected), actual); + ExpectEqual(*Literal::CreateR3<NativeT>(expected), actual); } template <typename NativeT> /* static */ void LiteralTestUtil::ExpectR2EqualArray2D( const Array2D<NativeT>& expected, const Literal& actual) { - ExpectEqual(*LiteralUtil::CreateR2FromArray2D(expected), actual); + ExpectEqual(*Literal::CreateR2FromArray2D(expected), actual); } template <typename NativeT> /* static */ void LiteralTestUtil::ExpectR3EqualArray3D( const Array3D<NativeT>& expected, const Literal& actual) { - ExpectEqual(*LiteralUtil::CreateR3FromArray3D(expected), actual); + ExpectEqual(*Literal::CreateR3FromArray3D(expected), actual); } template <typename NativeT> /* static */ void LiteralTestUtil::ExpectR4EqualArray4D( const Array4D<NativeT>& expected, const Literal& actual) { - ExpectEqual(*LiteralUtil::CreateR4FromArray4D(expected), actual); + ExpectEqual(*Literal::CreateR4FromArray4D(expected), actual); } template <typename NativeT> /* static */ void LiteralTestUtil::ExpectR0Near(NativeT expected, const Literal& actual, const ErrorSpec& error) { - ExpectNear(*LiteralUtil::CreateR0<NativeT>(expected), actual, error); + ExpectNear(*Literal::CreateR0<NativeT>(expected), actual, error); } template <typename NativeT> /* static */ void LiteralTestUtil::ExpectR1Near( tensorflow::gtl::ArraySlice<NativeT> expected, const Literal& actual, const ErrorSpec& error) { - ExpectNear(*LiteralUtil::CreateR1<NativeT>(expected), actual, error); + ExpectNear(*Literal::CreateR1<NativeT>(expected), actual, error); } template <typename NativeT> /* static */ void LiteralTestUtil::ExpectR2Near( std::initializer_list<std::initializer_list<NativeT>> expected, const Literal& actual, const ErrorSpec& error) { - ExpectNear(*LiteralUtil::CreateR2<NativeT>(expected), actual, error); + ExpectNear(*Literal::CreateR2<NativeT>(expected), actual, error); } template <typename NativeT> @@ -278,28 +278,28 @@ template <typename NativeT> std::initializer_list<std::initializer_list<std::initializer_list<NativeT>>> expected, const Literal& actual, const ErrorSpec& error) { - ExpectNear(*LiteralUtil::CreateR3<NativeT>(expected), actual, error); + ExpectNear(*Literal::CreateR3<NativeT>(expected), actual, error); } template <typename NativeT> /* static */ void LiteralTestUtil::ExpectR2NearArray2D( const Array2D<NativeT>& expected, const Literal& actual, const ErrorSpec& error) { - ExpectNear(*LiteralUtil::CreateR2FromArray2D(expected), actual, error); + ExpectNear(*Literal::CreateR2FromArray2D(expected), actual, error); } template <typename NativeT> /* static */ void LiteralTestUtil::ExpectR3NearArray3D( const Array3D<NativeT>& expected, const Literal& actual, const ErrorSpec& error) { - ExpectNear(*LiteralUtil::CreateR3FromArray3D(expected), actual, error); + ExpectNear(*Literal::CreateR3FromArray3D(expected), actual, error); } template <typename NativeT> /* static */ void LiteralTestUtil::ExpectR4NearArray4D( const Array4D<NativeT>& expected, const Literal& actual, const ErrorSpec& error) { - ExpectNear(*LiteralUtil::CreateR4FromArray4D(expected), actual, error); + ExpectNear(*Literal::CreateR4FromArray4D(expected), actual, error); } template <PrimitiveType type, typename T> @@ -309,9 +309,9 @@ LiteralTestUtil::CreateRandomLiteral( const std::function<T(tensorflow::gtl::ArraySlice<int64>)>& generator) { using NativeT = typename primitive_util::PrimitiveTypeToNative<type>::type; TF_RET_CHECK(shape.element_type() == type); - std::unique_ptr<Literal> literal = LiteralUtil::CreateFromShape(shape); - TF_RETURN_IF_ERROR(LiteralUtil::Populate<NativeT>( - literal.get(), [&](tensorflow::gtl::ArraySlice<int64> indexes) { + std::unique_ptr<Literal> literal = Literal::CreateFromShape(shape); + TF_RETURN_IF_ERROR(literal.get()->Populate<NativeT>( + [&](tensorflow::gtl::ArraySlice<int64> indexes) { return generator(indexes); })); return std::move(literal); diff --git a/tensorflow/compiler/xla/tests/literal_test_util_test.cc b/tensorflow/compiler/xla/tests/literal_test_util_test.cc index a94f45f73b..2acf27ed39 100644 --- a/tensorflow/compiler/xla/tests/literal_test_util_test.cc +++ b/tensorflow/compiler/xla/tests/literal_test_util_test.cc @@ -31,9 +31,8 @@ namespace xla { namespace { TEST(LiteralTestUtilTest, ComparesEqualTuplesEqual) { - std::unique_ptr<Literal> literal = LiteralUtil::MakeTuple({ - LiteralUtil::CreateR0<int32>(42).get(), - LiteralUtil::CreateR0<int32>(64).get(), + std::unique_ptr<Literal> literal = Literal::MakeTuple({ + Literal::CreateR0<int32>(42).get(), Literal::CreateR0<int32>(64).get(), }); LiteralTestUtil::ExpectEqual(*literal, *literal); } @@ -43,13 +42,11 @@ TEST(LiteralTestUtilTest, ComparesUnequalTuplesUnequal) { // un-fail an assertion failure. The CHECK-failure is death, so we can make a // death assertion. auto unequal_things_are_equal = [] { - std::unique_ptr<Literal> lhs = LiteralUtil::MakeTuple({ - LiteralUtil::CreateR0<int32>(42).get(), - LiteralUtil::CreateR0<int32>(64).get(), + std::unique_ptr<Literal> lhs = Literal::MakeTuple({ + Literal::CreateR0<int32>(42).get(), Literal::CreateR0<int32>(64).get(), }); - std::unique_ptr<Literal> rhs = LiteralUtil::MakeTuple({ - LiteralUtil::CreateR0<int32>(64).get(), - LiteralUtil::CreateR0<int32>(42).get(), + std::unique_ptr<Literal> rhs = Literal::MakeTuple({ + Literal::CreateR0<int32>(64).get(), Literal::CreateR0<int32>(42).get(), }); CHECK(LiteralTestUtil::Equal(*lhs, *rhs)) << "LHS and RHS are unequal"; }; @@ -58,8 +55,8 @@ TEST(LiteralTestUtilTest, ComparesUnequalTuplesUnequal) { TEST(LiteralTestUtilTest, ExpectNearFailurePlacesResultsInTemporaryDirectory) { auto dummy_lambda = [] { - auto two = LiteralUtil::CreateR0<float>(2); - auto four = LiteralUtil::CreateR0<float>(4); + auto two = Literal::CreateR0<float>(2); + auto four = Literal::CreateR0<float>(4); ErrorSpec error(0.001); CHECK(LiteralTestUtil::Near(*two, *four, error)) << "two is not near four"; }; @@ -88,11 +85,11 @@ TEST(LiteralTestUtilTest, ExpectNearFailurePlacesResultsInTemporaryDirectory) { &literal_proto)); Literal literal(literal_proto); if (result.find("expected") != string::npos) { - EXPECT_EQ("2", LiteralUtil::ToString(literal)); + EXPECT_EQ("2", literal.ToString()); } else if (result.find("actual") != string::npos) { - EXPECT_EQ("4", LiteralUtil::ToString(literal)); + EXPECT_EQ("4", literal.ToString()); } else if (result.find("miscompares") != string::npos) { - EXPECT_EQ("true", LiteralUtil::ToString(literal)); + EXPECT_EQ("true", literal.ToString()); } else { FAIL() << "unknown file in temporary directory: " << result; } diff --git a/tensorflow/compiler/xla/tests/map_test.cc b/tensorflow/compiler/xla/tests/map_test.cc index d613d79993..ffa87348a0 100644 --- a/tensorflow/compiler/xla/tests/map_test.cc +++ b/tensorflow/compiler/xla/tests/map_test.cc @@ -169,7 +169,7 @@ class MapTest : public ClientLibraryTestBase { TEST_F(MapTest, MapEachElemPlusOneR0) { // Applies lambda (x) (+ x 1)) to an input scalar. ComputationBuilder builder(client_, TestName()); - std::unique_ptr<Literal> param0_literal = LiteralUtil::CreateR0<float>(42.0); + std::unique_ptr<Literal> param0_literal = Literal::CreateR0<float>(42.0); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); @@ -183,7 +183,7 @@ TEST_F(MapTest, MapEachElemPlusOneR0) { XLA_TEST_F(MapTest, MapEachElemPlusOneR1S0) { // Maps (lambda (x) (+ x 1)) onto an input R1F32 vector of length 0. ComputationBuilder builder(client_, TestName()); - std::unique_ptr<Literal> param0_literal = LiteralUtil::CreateR1<float>({}); + std::unique_ptr<Literal> param0_literal = Literal::CreateR1<float>({}); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); @@ -198,7 +198,7 @@ TEST_F(MapTest, MapEachElemPlusOneR1S4) { // Maps (lambda (x) (+ x 1)) onto an input R1F32 vector of length 4. ComputationBuilder builder(client_, TestName()); std::unique_ptr<Literal> param0_literal = - LiteralUtil::CreateR1<float>({2.2f, 3.3f, 4.4f, 5.5f}); + Literal::CreateR1<float>({2.2f, 3.3f, 4.4f, 5.5f}); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); @@ -212,7 +212,7 @@ TEST_F(MapTest, MapEachElemPlusOneR1S4) { TEST_F(MapTest, MapEachF32ElementToS32Constant) { ComputationBuilder builder(client_, TestName()); std::unique_ptr<Literal> param0_literal = - LiteralUtil::CreateR1<float>({2.2f, 3.3f, 4.4f, 5.5f}); + Literal::CreateR1<float>({2.2f, 3.3f, 4.4f, 5.5f}); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); @@ -225,7 +225,7 @@ TEST_F(MapTest, MapEachF32ElementToS32Constant) { TEST_F(MapTest, MapEachF32ElementToU32Constant) { ComputationBuilder builder(client_, TestName()); std::unique_ptr<Literal> param0_literal = - LiteralUtil::CreateR1<float>({2.2f, 3.3f, 4.4f, 5.5f}); + Literal::CreateR1<float>({2.2f, 3.3f, 4.4f, 5.5f}); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); @@ -239,7 +239,7 @@ TEST_F(MapTest, MapEachElemLongerChainR1) { // Maps (lambda (x) (* (+ x 1) x)) onto an input R1F32 vector. ComputationBuilder builder(client_, TestName()); std::unique_ptr<Literal> param0_literal = - LiteralUtil::CreateR1<float>({2.6f, -5.1f, 0.1f, 0.2f, 999.0f, 255.5f}); + Literal::CreateR1<float>({2.6f, -5.1f, 0.1f, 0.2f, 999.0f, 255.5f}); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); @@ -255,7 +255,7 @@ XLA_TEST_F(MapTest, MapMultipleMapsR1S0) { // Maps (lambda (x) (+ x 1)) onto an input R1F32 vector of length 0, and then // maps (lambda (x) (* x 2)) on the result. ComputationBuilder builder(client_, TestName()); - std::unique_ptr<Literal> param0_literal = LiteralUtil::CreateR1<float>({}); + std::unique_ptr<Literal> param0_literal = Literal::CreateR1<float>({}); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); @@ -272,7 +272,7 @@ TEST_F(MapTest, MapMultipleMapsR1S4) { // maps (lambda (x) (* x 2)) on the result. ComputationBuilder builder(client_, TestName()); std::unique_ptr<Literal> param0_literal = - LiteralUtil::CreateR1<float>({2.2f, 3.3f, 4.4f, 5.5f}); + Literal::CreateR1<float>({2.2f, 3.3f, 4.4f, 5.5f}); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); @@ -287,7 +287,7 @@ TEST_F(MapTest, MapMultipleMapsR1S4) { TEST_F(MapTest, MapEachElemPlusOneR2) { // Maps (lambda (x) (+ x 1)) onto an input R2F32 vector. ComputationBuilder builder(client_, TestName()); - std::unique_ptr<Literal> param0_literal = LiteralUtil::CreateR2<float>( + std::unique_ptr<Literal> param0_literal = Literal::CreateR2<float>( {{13.25f, 14.0f}, {-7.1f, -7.2f}, {-8.8f, 8.8f}}); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); @@ -384,11 +384,11 @@ TEST_F(MapTest, MapBinaryAdder) { // Maps (lambda (x y) (+ x y)) onto two R1F32 vectors. ComputationBuilder builder(client_, TestName()); std::unique_ptr<Literal> param0_literal = - LiteralUtil::CreateR1<float>({2.2f, 3.3f, 4.4f, 5.5f}); + Literal::CreateR1<float>({2.2f, 3.3f, 4.4f, 5.5f}); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); std::unique_ptr<Literal> param1_literal = - LiteralUtil::CreateR1<float>({5.1f, 4.4f, -0.1f, -5.5f}); + Literal::CreateR1<float>({5.1f, 4.4f, -0.1f, -5.5f}); std::unique_ptr<GlobalData> param1_data = client_->TransferToServer(*param1_literal).ConsumeValueOrDie(); @@ -433,12 +433,12 @@ XLA_TEST_F(MapTest, AddWithMixedLayouts) { XLA_TEST_F(MapTest, AddR3_3x0x2) { ComputationBuilder builder(client_, TestName()); std::unique_ptr<Literal> param0_literal = - LiteralUtil::CreateR3FromArray3D<int32>(Array3D<int32>(3, 0, 2)); + Literal::CreateR3FromArray3D<int32>(Array3D<int32>(3, 0, 2)); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); std::unique_ptr<Literal> param1_literal = - LiteralUtil::CreateR3FromArray3D<int32>(Array3D<int32>(3, 0, 2)); + Literal::CreateR3FromArray3D<int32>(Array3D<int32>(3, 0, 2)); std::unique_ptr<GlobalData> param1_data = client_->TransferToServer(*param1_literal).ConsumeValueOrDie(); @@ -455,15 +455,15 @@ TEST_F(MapTest, MapTernaryAdder) { // Maps (lambda (x y z) (+ x y z)) onto three R1F32 vectors. ComputationBuilder builder(client_, TestName()); std::unique_ptr<Literal> param0_literal = - LiteralUtil::CreateR1<float>({2.2f, 3.3f, 4.4f, 5.5f}); + Literal::CreateR1<float>({2.2f, 3.3f, 4.4f, 5.5f}); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); std::unique_ptr<Literal> param1_literal = - LiteralUtil::CreateR1<float>({5.1f, 4.4f, -0.1f, -5.5f}); + Literal::CreateR1<float>({5.1f, 4.4f, -0.1f, -5.5f}); std::unique_ptr<GlobalData> param1_data = client_->TransferToServer(*param1_literal).ConsumeValueOrDie(); std::unique_ptr<Literal> param2_literal = - LiteralUtil::CreateR1<float>({-10.0f, -100.0f, -900.0f, -400.0f}); + Literal::CreateR1<float>({-10.0f, -100.0f, -900.0f, -400.0f}); std::unique_ptr<GlobalData> param2_data = client_->TransferToServer(*param2_literal).ConsumeValueOrDie(); @@ -516,11 +516,11 @@ TEST_F(MapTest, MapOperantionWithBuildError) { auto error_add = sub_builder->BuildAndNoteError(); std::unique_ptr<Literal> param0_literal = - LiteralUtil::CreateR1<float>({2.2f, 3.3f, 4.4f, 5.5f}); + Literal::CreateR1<float>({2.2f, 3.3f, 4.4f, 5.5f}); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); std::unique_ptr<Literal> param1_literal = - LiteralUtil::CreateR1<float>({5.1f, 4.4f, -0.1f, -5.5f}); + Literal::CreateR1<float>({5.1f, 4.4f, -0.1f, -5.5f}); std::unique_ptr<GlobalData> param1_data = client_->TransferToServer(*param1_literal).ConsumeValueOrDie(); @@ -553,8 +553,8 @@ TEST_F(MapTestWithFullOpt, MapScalarPower) { sub_builder->Pow(x, y); auto power = sub_builder->BuildAndNoteError(); - std::unique_ptr<Literal> param0_literal = LiteralUtil::CreateR0<float>(2.0f); - std::unique_ptr<Literal> param1_literal = LiteralUtil::CreateR0<float>(5.0f); + std::unique_ptr<Literal> param0_literal = Literal::CreateR0<float>(2.0f); + std::unique_ptr<Literal> param1_literal = Literal::CreateR0<float>(5.0f); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); std::unique_ptr<GlobalData> param1_data = @@ -580,8 +580,8 @@ TEST_F(MapTestWithFullOpt, MapSubtractOppositeOrder) { sub_builder->Sub(y, x); // note that this is y - x, not x - y auto sub_opposite = sub_builder->BuildAndNoteError(); - std::unique_ptr<Literal> param0_literal = LiteralUtil::CreateR0<float>(2.0f); - std::unique_ptr<Literal> param1_literal = LiteralUtil::CreateR0<float>(5.0f); + std::unique_ptr<Literal> param0_literal = Literal::CreateR0<float>(2.0f); + std::unique_ptr<Literal> param1_literal = Literal::CreateR0<float>(5.0f); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); std::unique_ptr<GlobalData> param1_data = @@ -605,7 +605,7 @@ TEST_F(MapTestWithFullOpt, MapSquare) { sub_builder->Mul(x, x); auto square = sub_builder->BuildAndNoteError(); - std::unique_ptr<Literal> param0_literal = LiteralUtil::CreateR0<float>(10.0f); + std::unique_ptr<Literal> param0_literal = Literal::CreateR0<float>(10.0f); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); diff --git a/tensorflow/compiler/xla/tests/matrix_ops_simple_test.cc b/tensorflow/compiler/xla/tests/matrix_ops_simple_test.cc index 883ec2277b..717e9cd494 100644 --- a/tensorflow/compiler/xla/tests/matrix_ops_simple_test.cc +++ b/tensorflow/compiler/xla/tests/matrix_ops_simple_test.cc @@ -87,8 +87,8 @@ TEST_F(MatOpsSimpleTest, ExpTwoByTwoValues) { builder.Exp(data); std::unique_ptr<Literal> expected = - LiteralUtil::CreateR2<float>({{2.71828, 1.00000}, // row 0 - {0.36788, 1.64872}}); // row 1 + Literal::CreateR2<float>({{2.71828, 1.00000}, // row 0 + {0.36788, 1.64872}}); // row 1 ComputeAndCompareLiteral(&builder, *expected, {}, ErrorSpec(1e-5)); } @@ -115,8 +115,8 @@ TEST_F(MatOpsSimpleTest, MapTwoByTwo) { auto map = builder.Map({data}, add_half); std::unique_ptr<Literal> expected = - LiteralUtil::CreateR2<float>({{1.5, 0.5}, // row 0 - {-0.5, 1.0}}); // row 1 + Literal::CreateR2<float>({{1.5, 0.5}, // row 0 + {-0.5, 1.0}}); // row 1 ComputeAndCompareLiteral(&builder, *expected, {}, ErrorSpec(1e-5)); } @@ -133,8 +133,8 @@ TEST_F(MatOpsSimpleTest, MaxTwoByTwoValues) { auto max = builder.Max(lhs, rhs); std::unique_ptr<Literal> expected = - LiteralUtil::CreateR2<float>({{7.0, 6.0}, // row 0 - {3.0, -4.0}}); // row 1 + Literal::CreateR2<float>({{7.0, 6.0}, // row 0 + {3.0, -4.0}}); // row 1 ComputeAndCompareLiteral(&builder, *expected, {}, ErrorSpec(1e-6)); } @@ -180,14 +180,12 @@ TEST_P(MatOpsDotAddTest, Dot_Add_2x2_2x2) { TF_ASSIGN_OR_ASSERT_OK( auto lhs_handle, - client_->TransferToServer( - *LiteralUtil::CreateR2FromArray2DWithLayout<float>( - lhs, LayoutUtil::MakeLayout(minor_to_major(row_major))))); + client_->TransferToServer(*Literal::CreateR2FromArray2DWithLayout<float>( + lhs, LayoutUtil::MakeLayout(minor_to_major(row_major))))); TF_ASSIGN_OR_ASSERT_OK( auto rhs_handle, - client_->TransferToServer( - *LiteralUtil::CreateR2FromArray2DWithLayout<float>( - rhs, LayoutUtil::MakeLayout(minor_to_major(row_major))))); + client_->TransferToServer(*Literal::CreateR2FromArray2DWithLayout<float>( + rhs, LayoutUtil::MakeLayout(minor_to_major(row_major))))); ComputationBuilder builder(client_, TestName()); auto lhs_arg = builder.Parameter(0, lhs_shape, "lhs"); diff --git a/tensorflow/compiler/xla/tests/pad_test.cc b/tensorflow/compiler/xla/tests/pad_test.cc index 73f4d539d9..e270a0477f 100644 --- a/tensorflow/compiler/xla/tests/pad_test.cc +++ b/tensorflow/compiler/xla/tests/pad_test.cc @@ -182,8 +182,8 @@ TEST_F(PadTest, Pad4DFloatArrayMinorFirstSmall) { const float pad_value = -5.123f; Array4D<float> input_array(1, 1, 2, 3, {1, 2, 3, 4, 5, 6}); - auto input = LiteralUtil::CreateR4FromArray4D<float>(input_array); - input = LiteralUtil::Relayout(*input, layout); + auto input = Literal::CreateR4FromArray4D<float>(input_array); + input = input->Relayout(layout); b.Pad(b.ConstantLiteral(*input), b.ConstantR0(pad_value), padding_config); @@ -227,8 +227,8 @@ XLA_TEST_F(PadTest, Pad4DFloatArrayMinorFirstNonTrivialMinorDimensions) { input_array(0, 0, 0, 0) = 1.0f; input_array(0, 24, 6, 6) = 2.0f; input_array(0, 17, 2, 5) = 3.0f; - auto input = LiteralUtil::CreateR4FromArray4D<float>(input_array); - input = LiteralUtil::Relayout(*input, layout); + auto input = Literal::CreateR4FromArray4D<float>(input_array); + input = input->Relayout(layout); b.Pad(b.ConstantLiteral(*input), b.ConstantR0(pad_value), padding_config); @@ -307,7 +307,7 @@ XLA_TEST_F(PadTest, Large2DPad) { auto ones = MakeUnique<Array2D<float>>(4, 4); ones->Fill(1.0f); - auto input_literal = LiteralUtil::CreateR2FromArray2D<float>(*ones); + auto input_literal = Literal::CreateR2FromArray2D<float>(*ones); std::unique_ptr<GlobalData> input_data = client_->TransferToServer(*input_literal).ConsumeValueOrDie(); @@ -333,7 +333,7 @@ XLA_TEST_F(PadTest, AllTypes2DPad) { auto operand = MakeUnique<Array2D<float>>(in_rows, in_cols); operand->FillUnique(0.0f); - auto input_literal = LiteralUtil::CreateR2FromArray2D<float>(*operand); + auto input_literal = Literal::CreateR2FromArray2D<float>(*operand); std::unique_ptr<GlobalData> input_data = client_->TransferToServer(*input_literal).ConsumeValueOrDie(); @@ -364,7 +364,7 @@ XLA_TEST_F(PadTest, High2DPad) { auto operand = MakeUnique<Array2D<float>>(in_rows, in_cols); operand->FillUnique(1.0f); - auto input_literal = LiteralUtil::CreateR2FromArray2D<float>(*operand); + auto input_literal = Literal::CreateR2FromArray2D<float>(*operand); auto expected = ReferenceUtil::PadArray2D(*operand, padding_config, 2.718f); std::unique_ptr<GlobalData> input_data = client_->TransferToServer(*input_literal).ConsumeValueOrDie(); @@ -396,7 +396,7 @@ XLA_TEST_F(PadTest, NegativePadding2D) { auto operand = MakeUnique<Array2D<float>>(in_rows, in_cols); operand->FillUnique(1.0f); - auto input_literal = LiteralUtil::CreateR2FromArray2D<float>(*operand); + auto input_literal = Literal::CreateR2FromArray2D<float>(*operand); auto expected = ReferenceUtil::PadArray2D(*operand, padding_config, 2.718f); std::unique_ptr<GlobalData> input_data = client_->TransferToServer(*input_literal).ConsumeValueOrDie(); @@ -428,7 +428,7 @@ XLA_TEST_F(PadTest, NegativeAndInteriorPadding2D) { auto operand = MakeUnique<Array2D<float>>(in_rows, in_cols); operand->FillUnique(1.0f); - auto input_literal = LiteralUtil::CreateR2FromArray2D<float>(*operand); + auto input_literal = Literal::CreateR2FromArray2D<float>(*operand); auto expected = ReferenceUtil::PadArray2D(*operand, padding_config, 2.718f); std::unique_ptr<GlobalData> input_data = client_->TransferToServer(*input_literal).ConsumeValueOrDie(); @@ -452,7 +452,7 @@ XLA_TEST_F(PadTest, ReducePad) { auto ones = MakeUnique<Array4D<float>>(2, 2, 2, 2); ones->Fill(1.0); - auto input_literal = LiteralUtil::CreateR4FromArray4D<float>(*ones); + auto input_literal = Literal::CreateR4FromArray4D<float>(*ones); std::unique_ptr<GlobalData> input_data = client_->TransferToServer(*input_literal).ConsumeValueOrDie(); diff --git a/tensorflow/compiler/xla/tests/params_test.cc b/tensorflow/compiler/xla/tests/params_test.cc index 03b8392a82..2065e9e813 100644 --- a/tensorflow/compiler/xla/tests/params_test.cc +++ b/tensorflow/compiler/xla/tests/params_test.cc @@ -43,8 +43,7 @@ class ParamsTest : public ClientLibraryTestBase {}; XLA_TEST_F(ParamsTest, ConstantR0F32Param) { ComputationBuilder builder(client_, TestName()); - std::unique_ptr<Literal> param0_literal = - LiteralUtil::CreateR0<float>(3.14159f); + std::unique_ptr<Literal> param0_literal = Literal::CreateR0<float>(3.14159f); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); @@ -56,7 +55,7 @@ XLA_TEST_F(ParamsTest, ConstantR0F32Param) { XLA_TEST_F(ParamsTest, ConstantR1S0F32Param) { ComputationBuilder builder(client_, TestName()); - std::unique_ptr<Literal> param0_literal = LiteralUtil::CreateR1<float>({}); + std::unique_ptr<Literal> param0_literal = Literal::CreateR1<float>({}); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); @@ -69,7 +68,7 @@ XLA_TEST_F(ParamsTest, ConstantR1S0F32Param) { XLA_TEST_F(ParamsTest, ConstantR1S2F32Param) { ComputationBuilder builder(client_, TestName()); std::unique_ptr<Literal> param0_literal = - LiteralUtil::CreateR1<float>({3.14f, -100.25f}); + Literal::CreateR1<float>({3.14f, -100.25f}); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); @@ -82,7 +81,7 @@ XLA_TEST_F(ParamsTest, ConstantR1S2F32Param) { XLA_TEST_F(ParamsTest, ConstantR1U8Param) { ComputationBuilder builder(client_, TestName()); string str("hello world"); - std::unique_ptr<Literal> param0_literal = LiteralUtil::CreateR1U8(str); + std::unique_ptr<Literal> param0_literal = Literal::CreateR1U8(str); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); @@ -95,7 +94,7 @@ XLA_TEST_F(ParamsTest, ConstantR1U8Param) { XLA_TEST_F(ParamsTest, ConstantR2_3x0_F32Param) { ComputationBuilder builder(client_, TestName()); std::unique_ptr<Literal> param0_literal = - LiteralUtil::CreateR2FromArray2D<float>(Array2D<float>(3, 0)); + Literal::CreateR2FromArray2D<float>(Array2D<float>(3, 0)); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); @@ -107,7 +106,7 @@ XLA_TEST_F(ParamsTest, ConstantR2_3x0_F32Param) { XLA_TEST_F(ParamsTest, ConstantR2F32Param) { ComputationBuilder builder(client_, TestName()); - std::unique_ptr<Literal> param0_literal = LiteralUtil::CreateR2<float>( + std::unique_ptr<Literal> param0_literal = Literal::CreateR2<float>( {{3.14f, -100.25f}, {7e8f, 7e-9f}, {30.3f, -100.0f}}); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); @@ -123,12 +122,12 @@ XLA_TEST_F(ParamsTest, ConstantR2F32Param) { XLA_TEST_F(ParamsTest, TwoParameters) { ComputationBuilder builder(client_, TestName()); - std::unique_ptr<Literal> literal0 = LiteralUtil::CreateR1<float>({1, 2}); + std::unique_ptr<Literal> literal0 = Literal::CreateR1<float>({1, 2}); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*literal0).ConsumeValueOrDie(); auto param0 = builder.Parameter(0, literal0->shape(), "param0"); - std::unique_ptr<Literal> literal1 = LiteralUtil::CreateR1<float>({10, 20}); + std::unique_ptr<Literal> literal1 = Literal::CreateR1<float>({10, 20}); std::unique_ptr<GlobalData> param1_data = client_->TransferToServer(*literal1).ConsumeValueOrDie(); auto param1 = builder.Parameter(1, literal1->shape(), "param1"); @@ -154,7 +153,7 @@ XLA_TEST_F(ParamsTest, TwoParameters) { XLA_TEST_F(ParamsTest, MissingParameter) { // Test that an error is returned when a computation with an incomplete set of // parameters (parameter numbers not contiguous from 0) is executed. - std::unique_ptr<Literal> literal = LiteralUtil::CreateR0<float>(3.14159f); + std::unique_ptr<Literal> literal = Literal::CreateR0<float>(3.14159f); std::unique_ptr<GlobalData> data = client_->TransferToServer(*literal).ConsumeValueOrDie(); @@ -172,12 +171,12 @@ XLA_TEST_F(ParamsTest, MissingParameter) { XLA_TEST_F(ParamsTest, UnusedParameter) { ComputationBuilder builder(client_, TestName()); - std::unique_ptr<Literal> literal0 = LiteralUtil::CreateR1<float>({1, 2}); + std::unique_ptr<Literal> literal0 = Literal::CreateR1<float>({1, 2}); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*literal0).ConsumeValueOrDie(); auto param0 = builder.Parameter(0, literal0->shape(), "param0"); - std::unique_ptr<Literal> literal1 = LiteralUtil::CreateR1<float>({10, 20}); + std::unique_ptr<Literal> literal1 = Literal::CreateR1<float>({10, 20}); std::unique_ptr<GlobalData> param1_data = client_->TransferToServer(*literal1).ConsumeValueOrDie(); auto param1 = builder.Parameter(1, literal1->shape(), "param1"); @@ -192,12 +191,11 @@ XLA_TEST_F(ParamsTest, UnusedParametersInUnusedExpression) { // unused expression. ComputationBuilder builder(client_, TestName()); - std::unique_ptr<Literal> literal0 = LiteralUtil::CreateR1<float>({1, 2}); + std::unique_ptr<Literal> literal0 = Literal::CreateR1<float>({1, 2}); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*literal0).ConsumeValueOrDie(); - std::unique_ptr<Literal> literal1 = - LiteralUtil::CreateR1<float>({10, 20, 30}); + std::unique_ptr<Literal> literal1 = Literal::CreateR1<float>({10, 20, 30}); std::unique_ptr<GlobalData> param1_data = client_->TransferToServer(*literal1).ConsumeValueOrDie(); @@ -237,7 +235,7 @@ XLA_TEST_F(ParamsTest, HundredLargeR1Parameters) { std::vector<float> sum_value = {{entry0, entry1}}; sum_value.resize(size); - std::unique_ptr<Literal> literal = LiteralUtil::CreateR1<float>(sum_value); + std::unique_ptr<Literal> literal = Literal::CreateR1<float>(sum_value); param_data_owner.push_back( client_->TransferToServer(*literal).ConsumeValueOrDie()); ComputationDataHandle param = @@ -267,9 +265,9 @@ XLA_TEST_F(ParamsTest, std::unique_ptr<GlobalData> data = client_ - ->TransferToServer(*LiteralUtil::MakeTuple({ - LiteralUtil::CreateR1<float>({1, 2, 3}).get(), - LiteralUtil::CreateR1<float>({4, 5, 6}).get(), + ->TransferToServer(*Literal::MakeTuple({ + Literal::CreateR1<float>({1, 2, 3}).get(), + Literal::CreateR1<float>({4, 5, 6}).get(), })) .ConsumeValueOrDie(); @@ -281,7 +279,7 @@ XLA_TEST_F(ParamsTest, // Verifies that passing a 2x2 with {0, 1} layout returns the same value back // when (transferred to the server and) passed through a parameter. XLA_TEST_F(ParamsTest, R2_2x2_Layout_01) { - std::unique_ptr<Literal> literal = LiteralUtil::CreateR2<float>({ + std::unique_ptr<Literal> literal = Literal::CreateR2<float>({ {1, 2}, {3, 4}, }); *literal->mutable_shape()->mutable_layout() = LayoutUtil::MakeLayout({0, 1}); @@ -295,7 +293,7 @@ XLA_TEST_F(ParamsTest, R2_2x2_Layout_01) { // As above, but for {1, 0} layout. XLA_TEST_F(ParamsTest, R2_2x2_Layout_10) { - std::unique_ptr<Literal> literal = LiteralUtil::CreateR2<float>({ + std::unique_ptr<Literal> literal = Literal::CreateR2<float>({ {1, 3}, {2, 4}, }); *literal->mutable_shape()->mutable_layout() = LayoutUtil::MakeLayout({1, 0}); @@ -308,7 +306,7 @@ XLA_TEST_F(ParamsTest, R2_2x2_Layout_10) { } XLA_TEST_F(ParamsTest, R2_2x2_TryToPassReverseLayoutToParameter) { - std::unique_ptr<Literal> literal = LiteralUtil::CreateR2<float>({ + std::unique_ptr<Literal> literal = Literal::CreateR2<float>({ {1, 3}, {2, 4}, }); const Shape original = literal->shape(); @@ -321,7 +319,7 @@ XLA_TEST_F(ParamsTest, R2_2x2_TryToPassReverseLayoutToParameter) { std::reverse(original_layout.begin(), original_layout.end()); *literal->mutable_shape()->mutable_layout() = LayoutUtil::MakeLayout(original_layout); - ASSERT_EQ(2, LiteralUtil::Get<float>(*literal, {0, 1})); + ASSERT_EQ(2, literal->Get<float>({0, 1})); } // Use the original shape in building the computation. ComputationBuilder builder(client_, TestName()); diff --git a/tensorflow/compiler/xla/tests/prng_test.cc b/tensorflow/compiler/xla/tests/prng_test.cc index 74346342e0..57e390bff9 100644 --- a/tensorflow/compiler/xla/tests/prng_test.cc +++ b/tensorflow/compiler/xla/tests/prng_test.cc @@ -57,11 +57,10 @@ void PrngTest::UniformTest(T a, T b, tensorflow::gtl::ArraySlice<int64> dims) { SetSeed(42); auto actual = ExecuteAndTransferOrDie(&builder, /*arguments=*/{}); EXPECT_THAT(dims, ::testing::ElementsAreArray(actual->shape().dimensions())); - LiteralUtil::EachCell<T>(*actual, - [=](tensorflow::gtl::ArraySlice<int64>, T value) { - EXPECT_LE(a, value); - EXPECT_LT(value, b); - }); + actual->EachCell<T>([=](tensorflow::gtl::ArraySlice<int64>, T value) { + EXPECT_LE(a, value); + EXPECT_LT(value, b); + }); } void PrngTest::BernoulliTest(float p, tensorflow::gtl::ArraySlice<int64> dims) { @@ -78,8 +77,8 @@ void PrngTest::BernoulliTest(float p, tensorflow::gtl::ArraySlice<int64> dims) { &execution_options)); EXPECT_THAT(dims, ::testing::ElementsAreArray(actual->shape().dimensions())); int32 sum = 0; - LiteralUtil::EachCell<uint32>( - *actual, [&sum](tensorflow::gtl::ArraySlice<int64>, uint32 value) { + actual->EachCell<uint32>( + [&sum](tensorflow::gtl::ArraySlice<int64>, uint32 value) { EXPECT_TRUE(value == 0 || value == 1); sum += value; }); @@ -123,10 +122,8 @@ double PrngTest::UniformChiSquared(int32 range_size, int32 expected_count) { SetSeed(42); auto actual = ExecuteAndTransferOrDie(&builder, /*arguments=*/{}); std::vector<int32> counts(range_size, 0); - LiteralUtil::EachCell<int32>( - *actual, [&counts](tensorflow::gtl::ArraySlice<int64>, int32 value) { - ++counts[value]; - }); + actual->EachCell<int32>([&counts](tensorflow::gtl::ArraySlice<int64>, + int32 value) { ++counts[value]; }); int64 sum = 0; for (int32 i = 0; i < range_size; ++i) { sum += Square(static_cast<int64>(counts[i] - expected_count)); @@ -169,7 +166,7 @@ XLA_TEST_F(PrngTest, MapUsingRng) { ComputationBuilder builder(client_, TestName()); std::unique_ptr<Literal> param0_literal = - LiteralUtil::CreateR1<float>({2.2f, 5.3f, 4.4f, 5.5f}); + Literal::CreateR1<float>({2.2f, 5.3f, 4.4f, 5.5f}); TF_ASSIGN_OR_ASSERT_OK(std::unique_ptr<GlobalData> param0_data, client_->TransferToServer(*param0_literal)); diff --git a/tensorflow/compiler/xla/tests/reduce_test.cc b/tensorflow/compiler/xla/tests/reduce_test.cc index 12072b6c95..ac65a47afa 100644 --- a/tensorflow/compiler/xla/tests/reduce_test.cc +++ b/tensorflow/compiler/xla/tests/reduce_test.cc @@ -63,12 +63,12 @@ class ReduceTest : public ClientLibraryTestBase { ReduceTest() { // Implementation note: laid out z >> y >> x by default. // clang-format off - literal_2d_ = LiteralUtil::CreateR2<float>({ + literal_2d_ = Literal::CreateR2<float>({ // x0 x1 x2 { 1.f, 2.f, 3.f}, // y0 { 4.f, 5.f, 6.f}, // y1 }); - literal_3d_ = LiteralUtil::CreateR3Projected<float>({ + literal_3d_ = Literal::CreateR3Projected<float>({ // x0 x1 x2 { 1.f, 2.f, 3.f}, // y0 { 4.f, 5.f, 6.f}, // y1 @@ -97,7 +97,7 @@ class ReduceTest : public ClientLibraryTestBase { } } std::unique_ptr<Literal> input_literal = - LiteralUtil::CreateR1(AsSlice(input_data)); + Literal::CreateR1(AsSlice(input_data)); std::unique_ptr<GlobalData> input_global_data = client_->TransferToServer(*input_literal).ConsumeValueOrDie(); @@ -129,7 +129,7 @@ class ReduceTest : public ClientLibraryTestBase { builder.Reduce(pred_values, init_value, reduce, /*dimensions_to_reduce=*/{0}); - std::unique_ptr<Literal> input_literal = LiteralUtil::CreateR1(input_data); + std::unique_ptr<Literal> input_literal = Literal::CreateR1(input_data); std::unique_ptr<GlobalData> input_global_data = client_->TransferToServer(*input_literal).ConsumeValueOrDie(); @@ -156,9 +156,9 @@ class ReduceTest : public ClientLibraryTestBase { Array2D<float> input_data(rows, cols); input_data.FillRandom(3.14f, 0.04); std::unique_ptr<Literal> input_literal = - LiteralUtil::CreateR2FromArray2D(input_data); - input_literal = LiteralUtil::Relayout( - *input_literal, LayoutUtil::MakeLayout({minor, major})); + Literal::CreateR2FromArray2D(input_data); + input_literal = + input_literal->Relayout(LayoutUtil::MakeLayout({minor, major})); std::unique_ptr<GlobalData> input_global_data = client_->TransferToServer(*input_literal).ConsumeValueOrDie(); @@ -184,9 +184,9 @@ class ReduceTest : public ClientLibraryTestBase { Array2D<float> input_data(rows, cols); input_data.FillRandom(3.14f, 0.04); std::unique_ptr<Literal> input_literal = - LiteralUtil::CreateR2FromArray2D(input_data); - input_literal = LiteralUtil::Relayout( - *input_literal, LayoutUtil::MakeLayout({minor, major})); + Literal::CreateR2FromArray2D(input_data); + input_literal = + input_literal->Relayout(LayoutUtil::MakeLayout({minor, major})); std::unique_ptr<GlobalData> input_global_data = client_->TransferToServer(*input_literal).ConsumeValueOrDie(); @@ -305,9 +305,8 @@ XLA_TEST_F(ReduceTest, ReduceElementwiseR2_111x50_To_R1) { Array2D<float> input_data(rows, cols); input_data.FillRandom(3.14f, 0.04); std::unique_ptr<Literal> input_literal = - LiteralUtil::CreateR2FromArray2D(input_data); - input_literal = - LiteralUtil::Relayout(*input_literal, LayoutUtil::MakeLayout({0, 1})); + Literal::CreateR2FromArray2D(input_data); + input_literal = input_literal->Relayout(LayoutUtil::MakeLayout({0, 1})); std::unique_ptr<GlobalData> input_global_data = client_->TransferToServer(*input_literal).ConsumeValueOrDie(); @@ -338,9 +337,8 @@ XLA_TEST_F(ReduceTest, TransposeAndReduceElementwiseR2_111x50_To_R1) { Array2D<float> input_data(rows, cols); input_data.FillRandom(3.14f, 0.04); std::unique_ptr<Literal> input_literal = - LiteralUtil::CreateR2FromArray2D(input_data); - input_literal = - LiteralUtil::Relayout(*input_literal, LayoutUtil::MakeLayout({0, 1})); + Literal::CreateR2FromArray2D(input_data); + input_literal = input_literal->Relayout(LayoutUtil::MakeLayout({0, 1})); std::unique_ptr<GlobalData> input_global_data = client_->TransferToServer(*input_literal).ConsumeValueOrDie(); @@ -371,7 +369,7 @@ XLA_TEST_F(ReduceTest, Reshape_111x2x25Reduce_111x50_To_R1) { Array3D<float> input_data(rows, 2, cols / 2); input_data.FillRandom(3.14f, 0.04); std::unique_ptr<Literal> input_literal = - LiteralUtil::CreateR3FromArray3D(input_data); + Literal::CreateR3FromArray3D(input_data); std::unique_ptr<GlobalData> input_global_data = client_->TransferToServer(*input_literal).ConsumeValueOrDie(); @@ -434,7 +432,7 @@ XLA_TEST_F(ReduceTest, MaxReduce2DToR0) { auto max = CreateScalarMaxComputation(F32, &builder); Array2D<float> input(300, 250); input.FillRandom(214.0f); - auto input_literal = LiteralUtil::CreateR2FromArray2D(input); + auto input_literal = Literal::CreateR2FromArray2D(input); builder.Reduce(builder.ConstantLiteral(*input_literal), builder.ConstantR0<float>(FLT_MIN), max, {0, 1}); auto input_max = FLT_MIN; @@ -449,7 +447,7 @@ XLA_TEST_F(ReduceTest, MinReduce2DToR0) { auto min = CreateScalarMinComputation(F32, &builder); Array2D<float> input(150, 130); input.FillRandom(214.0f); - auto input_literal = LiteralUtil::CreateR2FromArray2D(input); + auto input_literal = Literal::CreateR2FromArray2D(input); builder.Reduce(builder.ConstantLiteral(*input_literal), builder.ConstantR0<float>(FLT_MAX), min, {0, 1}); @@ -579,9 +577,9 @@ XLA_TEST_P(ReduceR3ToR2Test, ReduceR3ToR2) { Array3D<float> input_array(bounds[0], bounds[1], bounds[2]); input_array.FillRandom(3.14f, 0.05); - auto input_literal = LiteralUtil::CreateR3FromArray3D(input_array); - input_literal = LiteralUtil::Relayout( - *input_literal, LayoutUtil::MakeLayout(GetParam().layout)); + auto input_literal = Literal::CreateR3FromArray3D(input_array); + input_literal = + input_literal->Relayout(LayoutUtil::MakeLayout(GetParam().layout)); std::unique_ptr<GlobalData> input_data = client_->TransferToServer(*input_literal).ConsumeValueOrDie(); diff --git a/tensorflow/compiler/xla/tests/reduce_window_test.cc b/tensorflow/compiler/xla/tests/reduce_window_test.cc index ee3a837e16..6b4bceb437 100644 --- a/tensorflow/compiler/xla/tests/reduce_window_test.cc +++ b/tensorflow/compiler/xla/tests/reduce_window_test.cc @@ -57,7 +57,7 @@ class ReduceWindowTest : public ClientLibraryTestBase { tensorflow::gtl::ArraySlice<int64> window_strides, Padding padding) { builder_.ReduceWindow( - input, builder_.ConstantLiteral(LiteralUtil::MinValue(F32)), + input, builder_.ConstantLiteral(Literal::MinValue(F32)), CreateScalarMax(), window_dimensions, window_strides, padding); } @@ -66,7 +66,7 @@ class ReduceWindowTest : public ClientLibraryTestBase { tensorflow::gtl::ArraySlice<int64> window_strides, Padding padding) { builder_.ReduceWindow(input, - builder_.ConstantLiteral(LiteralUtil::MaxValue(F32)), + builder_.ConstantLiteral(Literal::MaxValue(F32)), CreateScalarMinComputation(F32, &builder_), window_dimensions, window_strides, padding); } diff --git a/tensorflow/compiler/xla/tests/replay_test.cc b/tensorflow/compiler/xla/tests/replay_test.cc index 4dea470477..cb7f54ea01 100644 --- a/tensorflow/compiler/xla/tests/replay_test.cc +++ b/tensorflow/compiler/xla/tests/replay_test.cc @@ -92,10 +92,10 @@ XLA_TEST_F(ReplayTest, XPlusYReplayWithParameters) { // Run it. std::unique_ptr<GlobalData> x_data = - client_->TransferToServer(*LiteralUtil::CreateR0<int32>(2)) + client_->TransferToServer(*Literal::CreateR0<int32>(2)) .ConsumeValueOrDie(); std::unique_ptr<GlobalData> y_data = - client_->TransferToServer(*LiteralUtil::CreateR0<int32>(3)) + client_->TransferToServer(*Literal::CreateR0<int32>(3)) .ConsumeValueOrDie(); std::unique_ptr<Literal> literal = client_ diff --git a/tensorflow/compiler/xla/tests/reshape_test.cc b/tensorflow/compiler/xla/tests/reshape_test.cc index 096ba8deb4..6748d196c1 100644 --- a/tensorflow/compiler/xla/tests/reshape_test.cc +++ b/tensorflow/compiler/xla/tests/reshape_test.cc @@ -70,7 +70,7 @@ XLA_TEST_F(ReshapeTest, SingleElementArrayToScalar) { XLA_TEST_F(ReshapeTest, ScalarToSingleElementArray) { ComputationBuilder builder(client_, TestName()); - std::unique_ptr<Literal> param0_literal = LiteralUtil::CreateR0<float>(1.0f); + std::unique_ptr<Literal> param0_literal = Literal::CreateR0<float>(1.0f); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); @@ -98,7 +98,7 @@ XLA_TEST_F(ReshapeTest, DISABLED_ON_GPU(Trivial0x3WithParameter)) { ComputationBuilder builder(client_, TestName()); std::unique_ptr<Literal> param0_literal = - LiteralUtil::CreateR2FromArray2D<float>(Array2D<float>(0, 3)); + Literal::CreateR2FromArray2D<float>(Array2D<float>(0, 3)); std::unique_ptr<GlobalData> param0_data = client_->TransferToServer(*param0_literal).ConsumeValueOrDie(); @@ -402,7 +402,7 @@ XLA_TEST_F(ReshapeTest, FullyConnectedCollapseDesugared) { XLA_TEST_F(ReshapeTest, ToScalar) { for (int rank = 0; rank < 8; ++rank) { ComputationBuilder b(client_, TestName()); - auto input = LiteralUtil::CreateR1<float>({83.0f}); + auto input = Literal::CreateR1<float>({83.0f}); std::vector<int64> ones(rank, 1); // this is {1, ..., 1}. std::vector<int64> dimensions(rank); std::iota(dimensions.begin(), dimensions.end(), 0); @@ -434,7 +434,7 @@ XLA_TEST_F(ReshapeTest, R4Dim0MinorLayoutToR2Dim0MajorLayout) { builder.Reshape(a, /*dimensions=*/{0, 1, 2, 3}, /*new_sizes=*/{2, 8}); // clang-format off - auto literal = LiteralUtil::CreateR4FromArray4DWithLayout(Array4D<float>{ + auto literal = Literal::CreateR4FromArray4DWithLayout(Array4D<float>{ { { {0, 1}, @@ -474,12 +474,12 @@ XLA_TEST_F(ReshapeTest, R4Dim0MinorLayoutToR2Dim0MajorLayout) { ->ExecuteAndTransfer(computation, {input.get()}, &execution_options) .ConsumeValueOrDie(); std::unique_ptr<Literal> expected = - LiteralUtil::CreateR2FromArray2D<float>(expected_array); + Literal::CreateR2FromArray2D<float>(expected_array); LiteralTestUtil::ExpectEqual(*expected, *actual); } XLA_TEST_F(ReshapeTest, R2ToR4_3x8_To_3x2x1x4) { - std::unique_ptr<Literal> input = LiteralUtil::CreateR2<float>({ + std::unique_ptr<Literal> input = Literal::CreateR2<float>({ {0, 1, 2, 3, 4, 5, 6, 7}, {100, 101, 102, 103, 104, 105, 106, 107}, {200, 201, 202, 203, 204, 205, 206, 207}, @@ -507,7 +507,7 @@ XLA_TEST_F(ReshapeTest, R2ToR4_3x8_To_3x2x1x4) { // Tests R2->R4 reshape with the reshape dimensions {1, 0}. XLA_TEST_F(ReshapeTest, R2ToR4_3x8_To_3x2x1x4_Dimensions_10) { - std::unique_ptr<Literal> input = LiteralUtil::CreateR2<float>({ + std::unique_ptr<Literal> input = Literal::CreateR2<float>({ {0, 1, 2, 3, 4, 5, 6, 7}, {100, 101, 102, 103, 104, 105, 106, 107}, {200, 201, 202, 203, 204, 205, 206, 207}, @@ -541,7 +541,7 @@ XLA_TEST_F(ReshapeTest, R4ToR2_2x1x1x1_To_2x1) { [&rng, &distribution](tensorflow::gtl::ArraySlice<int64> /* indices */, float* cell) { *cell = distribution(rng); }); std::unique_ptr<Literal> input_literal = - LiteralUtil::CreateR4FromArray4DWithLayout( + Literal::CreateR4FromArray4DWithLayout( input, LayoutUtil::MakeLayout({3, 2, 1, 0})); std::unique_ptr<GlobalData> input_data = client_->TransferToServer(*input_literal).ConsumeValueOrDie(); @@ -564,7 +564,7 @@ XLA_TEST_F(ReshapeTest, R4ToR2_2x1x4x1_To_4x2) { [&rng, &distribution](tensorflow::gtl::ArraySlice<int64> /* indices */, float* cell) { *cell = distribution(rng); }); std::unique_ptr<Literal> input_literal = - LiteralUtil::CreateR4FromArray4DWithLayout( + Literal::CreateR4FromArray4DWithLayout( input, LayoutUtil::MakeLayout({3, 2, 1, 0})); std::unique_ptr<GlobalData> input_data = client_->TransferToServer(*input_literal).ConsumeValueOrDie(); @@ -588,7 +588,7 @@ XLA_TEST_F(ReshapeTest, R4ToR2_5x10x2x3_To_5x60_Dimensions_0213) { [&rng, &distribution](tensorflow::gtl::ArraySlice<int64> /* indices */, float* cell) { *cell = distribution(rng); }); std::unique_ptr<Literal> input_literal = - LiteralUtil::CreateR4FromArray4DWithLayout( + Literal::CreateR4FromArray4DWithLayout( input, LayoutUtil::MakeLayout({3, 2, 1, 0})); std::unique_ptr<GlobalData> input_data = client_->TransferToServer(*input_literal).ConsumeValueOrDie(); @@ -602,7 +602,7 @@ XLA_TEST_F(ReshapeTest, R4ToR2_5x10x2x3_To_5x60_Dimensions_0213) { expected_array(indices[0], indices[2] * 30 + indices[1] * 3 + indices[3]) = *cell; }); - auto expected = LiteralUtil::CreateR2FromArray2D(expected_array); + auto expected = Literal::CreateR2FromArray2D(expected_array); ComputeAndCompareLiteral(&builder, *expected, {input_data.get()}); } @@ -614,7 +614,7 @@ XLA_TEST_F(ReshapeTest, NoopReshape) { [&rng, &distribution](tensorflow::gtl::ArraySlice<int64> /* indices */, float* cell) { *cell = distribution(rng); }); std::unique_ptr<Literal> input_literal = - LiteralUtil::CreateR4FromArray4DWithLayout( + Literal::CreateR4FromArray4DWithLayout( input_array, LayoutUtil::MakeLayout({1, 2, 3, 0})); std::unique_ptr<GlobalData> input_data = client_->TransferToServer(*input_literal).ConsumeValueOrDie(); @@ -641,7 +641,7 @@ XLA_TEST_F(ReshapeTest, NoopReshape) { } XLA_TEST_F(ReshapeTest, R4ToR4Reshape_Trivial) { - auto literal_1x2x3x4 = LiteralUtil::CreateR4( + auto literal_1x2x3x4 = Literal::CreateR4( {{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}, {{13, 14, 15, 16}, {17, 18, 19, 20}, {21, 22, 23, 24}}}}); @@ -654,7 +654,7 @@ XLA_TEST_F(ReshapeTest, R4ToR4Reshape_Trivial) { } XLA_TEST_F(ReshapeTest, R4ToR4Reshape) { - auto literal_1x2x3x4 = LiteralUtil::CreateR4( + auto literal_1x2x3x4 = Literal::CreateR4( {{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}, {{13, 14, 15, 16}, {17, 18, 19, 20}, {21, 22, 23, 24}}}}); @@ -664,7 +664,7 @@ XLA_TEST_F(ReshapeTest, R4ToR4Reshape) { /*new_sizes=*/{2, 4, 3, 1}); // clang-format off - auto expected_2x4x3x1 = LiteralUtil::CreateR4( + auto expected_2x4x3x1 = Literal::CreateR4( {{{{1}, {5}, {9}}, {{2}, {6}, {10}}, {{3}, {7}, {11}}, @@ -688,7 +688,7 @@ XLA_TEST_F(ReshapeTest, R4TwoMinorTransposeSimple) { [&rng, &distribution](tensorflow::gtl::ArraySlice<int64> /* indices */, float* cell) { *cell = distribution(rng); }); std::unique_ptr<Literal> input_literal = - LiteralUtil::CreateR4FromArray4DWithLayout( + Literal::CreateR4FromArray4DWithLayout( input, LayoutUtil::MakeLayout({3, 2, 1, 0})); std::unique_ptr<GlobalData> input_data = client_->TransferToServer(*input_literal).ConsumeValueOrDie(); @@ -697,9 +697,9 @@ XLA_TEST_F(ReshapeTest, R4TwoMinorTransposeSimple) { auto a = builder.Parameter(0, input_literal->shape(), "a"); builder.Reshape(a, /*dimensions=*/{0, 1, 3, 2}, /*new_sizes=*/new_bounds); - std::unique_ptr<Literal> expected = LiteralUtil::Relayout( - *LiteralTestUtil::Reshape(new_bounds, {2, 3, 1, 0}, *input_literal), - LayoutUtil::MakeLayout({3, 2, 1, 0})); + std::unique_ptr<Literal> expected = + LiteralTestUtil::Reshape(new_bounds, {2, 3, 1, 0}, *input_literal) + ->Relayout(LayoutUtil::MakeLayout({3, 2, 1, 0})); // Specify the requested output shape explicitly to ensure that this reshape // actually corresponds to a two minor transpose. @@ -717,7 +717,7 @@ XLA_TEST_F(ReshapeTest, R4TwoMinorTransposeMajorFirstEffectiveR2) { [&rng, &distribution](tensorflow::gtl::ArraySlice<int64> /* indices */, float* cell) { *cell = distribution(rng); }); std::unique_ptr<Literal> input_literal = - LiteralUtil::CreateR4FromArray4DWithLayout( + Literal::CreateR4FromArray4DWithLayout( input, LayoutUtil::MakeLayout({3, 2, 1, 0})); std::unique_ptr<GlobalData> input_data = client_->TransferToServer(*input_literal).ConsumeValueOrDie(); @@ -726,9 +726,9 @@ XLA_TEST_F(ReshapeTest, R4TwoMinorTransposeMajorFirstEffectiveR2) { auto a = builder.Parameter(0, input_literal->shape(), "a"); builder.Reshape(a, /*dimensions=*/{0, 1, 3, 2}, /*new_sizes=*/new_bounds); - std::unique_ptr<Literal> expected = LiteralUtil::Relayout( - *LiteralTestUtil::Reshape(new_bounds, {2, 3, 1, 0}, *input_literal), - LayoutUtil::MakeLayout({3, 2, 1, 0})); + std::unique_ptr<Literal> expected = + LiteralTestUtil::Reshape(new_bounds, {2, 3, 1, 0}, *input_literal) + ->Relayout(LayoutUtil::MakeLayout({3, 2, 1, 0})); // Specify the requested output shape explicitly to ensure that this reshape // actually corresponds to a two minor transpose. @@ -746,7 +746,7 @@ XLA_TEST_F(ReshapeTest, R4TwoMinorTransposeMajorFirstMinorEffectiveR1) { [&rng, &distribution](tensorflow::gtl::ArraySlice<int64> /* indices */, float* cell) { *cell = distribution(rng); }); std::unique_ptr<Literal> input_literal = - LiteralUtil::CreateR4FromArray4DWithLayout( + Literal::CreateR4FromArray4DWithLayout( input, LayoutUtil::MakeLayout({3, 2, 1, 0})); std::unique_ptr<GlobalData> input_data = client_->TransferToServer(*input_literal).ConsumeValueOrDie(); @@ -755,9 +755,9 @@ XLA_TEST_F(ReshapeTest, R4TwoMinorTransposeMajorFirstMinorEffectiveR1) { auto a = builder.Parameter(0, input_literal->shape(), "a"); builder.Reshape(a, /*dimensions=*/{0, 1, 3, 2}, /*new_sizes=*/new_bounds); - std::unique_ptr<Literal> expected = LiteralUtil::Relayout( - *LiteralTestUtil::Reshape(new_bounds, {2, 3, 1, 0}, *input_literal), - LayoutUtil::MakeLayout({3, 2, 1, 0})); + std::unique_ptr<Literal> expected = + LiteralTestUtil::Reshape(new_bounds, {2, 3, 1, 0}, *input_literal) + ->Relayout(LayoutUtil::MakeLayout({3, 2, 1, 0})); // Specify the requested output shape explicitly to ensure that this reshape // actually corresponds to a two minor transpose. @@ -776,7 +776,7 @@ XLA_TEST_F(ReshapeTest, R4TwoMinorTransposeMajorFirstMinorEffectiveR1InR2) { [&rng, &distribution](tensorflow::gtl::ArraySlice<int64> /* indices */, float* cell) { *cell = distribution(rng); }); std::unique_ptr<Literal> input_literal = - LiteralUtil::CreateR4FromArray4DWithLayout( + Literal::CreateR4FromArray4DWithLayout( input, LayoutUtil::MakeLayout({3, 2, 1, 0})); std::unique_ptr<GlobalData> input_data = client_->TransferToServer(*input_literal).ConsumeValueOrDie(); @@ -785,9 +785,9 @@ XLA_TEST_F(ReshapeTest, R4TwoMinorTransposeMajorFirstMinorEffectiveR1InR2) { auto a = builder.Parameter(0, input_literal->shape(), "a"); builder.Reshape(a, /*dimensions=*/{0, 1, 3, 2}, /*new_sizes=*/new_bounds); - std::unique_ptr<Literal> expected = LiteralUtil::Relayout( - *LiteralTestUtil::Reshape(new_bounds, {2, 3, 1, 0}, *input_literal), - LayoutUtil::MakeLayout({3, 2, 1, 0})); + std::unique_ptr<Literal> expected = + LiteralTestUtil::Reshape(new_bounds, {2, 3, 1, 0}, *input_literal) + ->Relayout(LayoutUtil::MakeLayout({3, 2, 1, 0})); // Specify the requested output shape explicitly to ensure that this reshape // actually corresponds to a two minor transpose. @@ -805,7 +805,7 @@ XLA_TEST_F(ReshapeTest, R4TwoMinorTransposeTrivialR2) { [&rng, &distribution](tensorflow::gtl::ArraySlice<int64> /* indices */, float* cell) { *cell = distribution(rng); }); std::unique_ptr<Literal> input_literal = - LiteralUtil::CreateR4FromArray4DWithLayout( + Literal::CreateR4FromArray4DWithLayout( input, LayoutUtil::MakeLayout({0, 1, 2, 3})); std::unique_ptr<GlobalData> input_data = client_->TransferToServer(*input_literal).ConsumeValueOrDie(); @@ -814,9 +814,9 @@ XLA_TEST_F(ReshapeTest, R4TwoMinorTransposeTrivialR2) { auto a = builder.Parameter(0, input_literal->shape(), "a"); builder.Reshape(a, /*dimensions=*/{1, 0, 2, 3}, /*new_sizes=*/new_bounds); - std::unique_ptr<Literal> expected = LiteralUtil::Relayout( - *LiteralTestUtil::Reshape(new_bounds, {1, 0, 2, 3}, *input_literal), - input_literal->shape().layout()); + std::unique_ptr<Literal> expected = + LiteralTestUtil::Reshape(new_bounds, {1, 0, 2, 3}, *input_literal) + ->Relayout(input_literal->shape().layout()); // Specify the requested output shape explicitly to ensure that this reshape // actually corresponds to a two minor transpose. diff --git a/tensorflow/compiler/xla/tests/round_trip_packed_literal_test.cc b/tensorflow/compiler/xla/tests/round_trip_packed_literal_test.cc index 54d1d76ec9..5b4c05c673 100644 --- a/tensorflow/compiler/xla/tests/round_trip_packed_literal_test.cc +++ b/tensorflow/compiler/xla/tests/round_trip_packed_literal_test.cc @@ -65,8 +65,8 @@ TEST_F(RoundTripPackedLiteralTest, RoundTripsR1F32Length2) { reader.Read(ShapeUtil::MakeShape(F32, {2})).ConsumeValueOrDie(); EXPECT_TRUE(reader.IsExhausted()); - EXPECT_EQ(42.0, LiteralUtil::Get<float>(*actual, {0})); - EXPECT_EQ(24.0, LiteralUtil::Get<float>(*actual, {1})); + EXPECT_EQ(42.0, actual->Get<float>({0})); + EXPECT_EQ(24.0, actual->Get<float>({1})); } TEST_F(RoundTripPackedLiteralTest, RoundTripsR2F32Size2x2Dim0Minor) { @@ -95,10 +95,10 @@ TEST_F(RoundTripPackedLiteralTest, RoundTripsR2F32Size2x2Dim0Minor) { .ConsumeValueOrDie(); EXPECT_TRUE(reader.IsExhausted()); - EXPECT_EQ(42.0f, LiteralUtil::Get<float>(*actual, {0, 0})); - EXPECT_EQ(24.0f, LiteralUtil::Get<float>(*actual, {0, 1})); - EXPECT_EQ(64.0f, LiteralUtil::Get<float>(*actual, {1, 0})); - EXPECT_EQ(46.0f, LiteralUtil::Get<float>(*actual, {1, 1})); + EXPECT_EQ(42.0f, actual->Get<float>({0, 0})); + EXPECT_EQ(24.0f, actual->Get<float>({0, 1})); + EXPECT_EQ(64.0f, actual->Get<float>({1, 0})); + EXPECT_EQ(46.0f, actual->Get<float>({1, 1})); std::unique_ptr<Literal> round_tripped = RoundTripToServer(*actual); LiteralTestUtil::ExpectEqual(*round_tripped, *actual); @@ -130,10 +130,10 @@ TEST_F(RoundTripPackedLiteralTest, RoundTripsR2F32Size2x2Dim1Minor) { .ConsumeValueOrDie(); EXPECT_TRUE(reader.IsExhausted()); - EXPECT_EQ(42.0f, LiteralUtil::Get<float>(*actual, {0, 0})); - EXPECT_EQ(24.0f, LiteralUtil::Get<float>(*actual, {1, 0})); - EXPECT_EQ(64.0f, LiteralUtil::Get<float>(*actual, {0, 1})); - EXPECT_EQ(46.0f, LiteralUtil::Get<float>(*actual, {1, 1})); + EXPECT_EQ(42.0f, actual->Get<float>({0, 0})); + EXPECT_EQ(24.0f, actual->Get<float>({1, 0})); + EXPECT_EQ(64.0f, actual->Get<float>({0, 1})); + EXPECT_EQ(46.0f, actual->Get<float>({1, 1})); std::unique_ptr<Literal> round_tripped = RoundTripToServer(*actual); LiteralTestUtil::ExpectEqual(*round_tripped, *actual); diff --git a/tensorflow/compiler/xla/tests/round_trip_transfer_test.cc b/tensorflow/compiler/xla/tests/round_trip_transfer_test.cc index 71af695ae3..e6a6b7b37a 100644 --- a/tensorflow/compiler/xla/tests/round_trip_transfer_test.cc +++ b/tensorflow/compiler/xla/tests/round_trip_transfer_test.cc @@ -47,62 +47,61 @@ class RoundTripTransferTest : public ClientLibraryTestBase { }; TEST_F(RoundTripTransferTest, R0S32) { - RoundTripTest(*LiteralUtil::CreateR0<int32>(42)); + RoundTripTest(*Literal::CreateR0<int32>(42)); } TEST_F(RoundTripTransferTest, R0F32) { - RoundTripTest(*LiteralUtil::CreateR0<float>(42.0)); + RoundTripTest(*Literal::CreateR0<float>(42.0)); } TEST_F(RoundTripTransferTest, R1F32_Len0) { - RoundTripTest(*LiteralUtil::CreateR1<float>({})); + RoundTripTest(*Literal::CreateR1<float>({})); } TEST_F(RoundTripTransferTest, R1F32_Len2) { - RoundTripTest(*LiteralUtil::CreateR1<float>({42.0, 64.0})); + RoundTripTest(*Literal::CreateR1<float>({42.0, 64.0})); } TEST_F(RoundTripTransferTest, R1F32_Len256) { std::vector<float> values(256); std::iota(values.begin(), values.end(), 1.0); - RoundTripTest(*LiteralUtil::CreateR1<float>(values)); + RoundTripTest(*Literal::CreateR1<float>(values)); } TEST_F(RoundTripTransferTest, R1F32_Len1024) { std::vector<float> values(1024); std::iota(values.begin(), values.end(), 1.0); - RoundTripTest(*LiteralUtil::CreateR1<float>(values)); + RoundTripTest(*Literal::CreateR1<float>(values)); } TEST_F(RoundTripTransferTest, R1F32_Len1025) { std::vector<float> values(1025); std::iota(values.begin(), values.end(), 1.0); - RoundTripTest(*LiteralUtil::CreateR1<float>(values)); + RoundTripTest(*Literal::CreateR1<float>(values)); } TEST_F(RoundTripTransferTest, R1F32_Len4096) { std::vector<float> values(4096); std::iota(values.begin(), values.end(), 1.0); - RoundTripTest(*LiteralUtil::CreateR1<float>(values)); + RoundTripTest(*Literal::CreateR1<float>(values)); } TEST_F(RoundTripTransferTest, R2F32_Len10x0) { - RoundTripTest( - *LiteralUtil::CreateR2FromArray2D<float>(Array2D<float>(10, 0))); + RoundTripTest(*Literal::CreateR2FromArray2D<float>(Array2D<float>(10, 0))); } TEST_F(RoundTripTransferTest, R2F32_Len2x2) { - RoundTripTest(*LiteralUtil::CreateR2<float>({{42.0, 64.0}, {77.0, 88.0}})); + RoundTripTest(*Literal::CreateR2<float>({{42.0, 64.0}, {77.0, 88.0}})); } TEST_F(RoundTripTransferTest, R3F32) { RoundTripTest( - *LiteralUtil::CreateR3<float>({{{1.0, 2.0}, {1.0, 2.0}, {1.0, 2.0}}, - {{3.0, 4.0}, {3.0, 4.0}, {3.0, 4.0}}})); + *Literal::CreateR3<float>({{{1.0, 2.0}, {1.0, 2.0}, {1.0, 2.0}}, + {{3.0, 4.0}, {3.0, 4.0}, {3.0, 4.0}}})); } TEST_F(RoundTripTransferTest, R4F32) { - RoundTripTest(*LiteralUtil::CreateR4<float>({{ + RoundTripTest(*Literal::CreateR4<float>({{ {{10, 11, 12, 13}, {14, 15, 16, 17}}, {{18, 19, 20, 21}, {22, 23, 24, 25}}, {{26, 27, 28, 29}, {30, 31, 32, 33}}, @@ -110,36 +109,33 @@ TEST_F(RoundTripTransferTest, R4F32) { } TEST_F(RoundTripTransferTest, EmptyTuple) { - RoundTripTest(*LiteralUtil::MakeTuple({})); + RoundTripTest(*Literal::MakeTuple({})); } TEST_F(RoundTripTransferTest, TupleOfR1F32) { - RoundTripTest( - *LiteralUtil::MakeTuple({LiteralUtil::CreateR1<float>({1, 2}).get(), - LiteralUtil::CreateR1<float>({3, 4}).get()})); + RoundTripTest(*Literal::MakeTuple({Literal::CreateR1<float>({1, 2}).get(), + Literal::CreateR1<float>({3, 4}).get()})); } TEST_F(RoundTripTransferTest, TupleOfR1F32_Len0_Len2) { - RoundTripTest( - *LiteralUtil::MakeTuple({LiteralUtil::CreateR1<float>({}).get(), - LiteralUtil::CreateR1<float>({3, 4}).get()})); + RoundTripTest(*Literal::MakeTuple({Literal::CreateR1<float>({}).get(), + Literal::CreateR1<float>({3, 4}).get()})); } TEST_F(RoundTripTransferTest, TupleOfR0F32AndR1S32) { - RoundTripTest( - *LiteralUtil::MakeTuple({LiteralUtil::CreateR0<float>(1.0).get(), - LiteralUtil::CreateR1<int>({2, 3}).get()})); + RoundTripTest(*Literal::MakeTuple({Literal::CreateR0<float>(1.0).get(), + Literal::CreateR1<int>({2, 3}).get()})); } // Below two tests are added to identify the cost of large data transfers. TEST_F(RoundTripTransferTest, R2F32_Large) { - RoundTripTest(*LiteralUtil::CreateR2F32Linspace(-1.0f, 1.0f, 512, 512)); + RoundTripTest(*Literal::CreateR2F32Linspace(-1.0f, 1.0f, 512, 512)); } TEST_F(RoundTripTransferTest, R4F32_Large) { Array4D<float> array4d(2, 2, 256, 256); array4d.FillWithMultiples(1.0f); - RoundTripTest(*LiteralUtil::CreateR4FromArray4D<float>(array4d)); + RoundTripTest(*Literal::CreateR4FromArray4D<float>(array4d)); } } // namespace diff --git a/tensorflow/compiler/xla/tests/scalar_computations_test.cc b/tensorflow/compiler/xla/tests/scalar_computations_test.cc index 7b32bb93fd..07bd00f015 100644 --- a/tensorflow/compiler/xla/tests/scalar_computations_test.cc +++ b/tensorflow/compiler/xla/tests/scalar_computations_test.cc @@ -211,9 +211,9 @@ TEST_F(ScalarComputationsTest, MulThreeScalarsS32) { TEST_F(ScalarComputationsTest, MulThreeScalarsF32Params) { ComputationBuilder builder(client_, TestName()); - std::unique_ptr<Literal> a_literal = LiteralUtil::CreateR0<float>(2.1f); - std::unique_ptr<Literal> b_literal = LiteralUtil::CreateR0<float>(5.5f); - std::unique_ptr<Literal> c_literal = LiteralUtil::CreateR0<float>(0.5f); + std::unique_ptr<Literal> a_literal = Literal::CreateR0<float>(2.1f); + std::unique_ptr<Literal> b_literal = Literal::CreateR0<float>(5.5f); + std::unique_ptr<Literal> c_literal = Literal::CreateR0<float>(0.5f); std::unique_ptr<GlobalData> a_data = client_->TransferToServer(*a_literal).ConsumeValueOrDie(); @@ -360,8 +360,8 @@ TEST_F(ScalarComputationsTest, DivU32s) { for (uint32 divisor : vals) { if (divisor != 0) { for (uint32 dividend : vals) { - auto dividend_literal = LiteralUtil::CreateR0<uint32>(dividend); - auto divisor_literal = LiteralUtil::CreateR0<uint32>(divisor); + auto dividend_literal = Literal::CreateR0<uint32>(dividend); + auto divisor_literal = Literal::CreateR0<uint32>(divisor); TF_ASSIGN_OR_ASSERT_OK(auto dividend_data, client_->TransferToServer(*dividend_literal)); TF_ASSIGN_OR_ASSERT_OK(auto divisor_data, @@ -372,8 +372,7 @@ TEST_F(ScalarComputationsTest, DivU32s) { {dividend_data.get(), divisor_data.get()}, &execution_options_) .ConsumeValueOrDie(); - auto expected_literal = - LiteralUtil::CreateR0<uint32>(dividend / divisor); + auto expected_literal = Literal::CreateR0<uint32>(dividend / divisor); LiteralTestUtil::ExpectEqual(*expected_literal, *actual_literal); } } @@ -402,8 +401,8 @@ TEST_F(ScalarComputationsTest, RemU32s) { for (uint32 divisor : vals) { if (divisor != 0) { for (uint32 dividend : vals) { - auto dividend_literal = LiteralUtil::CreateR0<uint32>(dividend); - auto divisor_literal = LiteralUtil::CreateR0<uint32>(divisor); + auto dividend_literal = Literal::CreateR0<uint32>(dividend); + auto divisor_literal = Literal::CreateR0<uint32>(divisor); TF_ASSIGN_OR_ASSERT_OK(auto dividend_data, client_->TransferToServer(*dividend_literal)); TF_ASSIGN_OR_ASSERT_OK(auto divisor_data, @@ -414,8 +413,7 @@ TEST_F(ScalarComputationsTest, RemU32s) { {dividend_data.get(), divisor_data.get()}, &execution_options_) .ConsumeValueOrDie(); - auto expected_literal = - LiteralUtil::CreateR0<uint32>(dividend % divisor); + auto expected_literal = Literal::CreateR0<uint32>(dividend % divisor); LiteralTestUtil::ExpectEqual(*expected_literal, *actual_literal); } } @@ -427,7 +425,7 @@ TEST_F(ScalarComputationsTest, RemainderTwoScalarsNonConstDividendS32) { auto x = builder.Parameter(0, ShapeUtil::MakeShape(S32, {}), "x"); builder.Rem(x, builder.ConstantR0<int32>(80000)); - std::unique_ptr<Literal> literal = LiteralUtil::CreateR0<int32>(87919); + std::unique_ptr<Literal> literal = Literal::CreateR0<int32>(87919); TF_ASSIGN_OR_ASSERT_OK(auto input_data, client_->TransferToServer(*literal)); ComputeAndCompareR0<int32>(&builder, 7919, {input_data.get()}); } @@ -763,7 +761,7 @@ TEST_F(ScalarComputationsTest, ComplicatedArithmeticExpressionS32) { TEST_F(ScalarComputationsTest, SqrtF320) { ComputationBuilder builder(client_, TestName()); - Literal zero_literal = LiteralUtil::Zero(PrimitiveType::F32); + Literal zero_literal = Literal::Zero(PrimitiveType::F32); std::unique_ptr<GlobalData> zero_data = client_->TransferToServer(zero_literal).ConsumeValueOrDie(); diff --git a/tensorflow/compiler/xla/tests/test_utils.h b/tensorflow/compiler/xla/tests/test_utils.h index 6a23df4d3c..f3a522b05e 100644 --- a/tensorflow/compiler/xla/tests/test_utils.h +++ b/tensorflow/compiler/xla/tests/test_utils.h @@ -61,7 +61,7 @@ std::unique_ptr<Literal> CreateR2LiteralWithLayout( auto literal = MakeUnique<Literal>(); const int64 d0 = values.size(); const int64 d1 = values.begin()->size(); - LiteralUtil::PopulateWithValue<NativeT>(0, {d0, d1}, literal.get()); + literal.get()->PopulateWithValue<NativeT>(0, {d0, d1}); *literal->mutable_shape()->mutable_layout() = LayoutUtil::MakeLayout(minor_to_major); TF_CHECK_OK(ShapeUtil::ValidateShape(literal->shape())); @@ -70,7 +70,7 @@ std::unique_ptr<Literal> CreateR2LiteralWithLayout( for (auto inner_list : values) { int64 dim1 = 0; for (auto value : inner_list) { - LiteralUtil::Set(literal.get(), {dim0, dim1}, value); + literal.get()->Set({dim0, dim1}, value); ++dim1; } ++dim0; @@ -88,7 +88,7 @@ std::unique_ptr<Literal> CreateR3LiteralWithLayout( const int64 d0 = values.size(); const int64 d1 = values.begin()->size(); const int64 d2 = values.begin()->begin()->size(); - LiteralUtil::PopulateWithValue<NativeT>(0, {d0, d1, d2}, literal.get()); + literal.get()->PopulateWithValue<NativeT>(0, {d0, d1, d2}); *literal->mutable_shape()->mutable_layout() = LayoutUtil::MakeLayout(minor_to_major); TF_CHECK_OK(ShapeUtil::ValidateShape(literal->shape())); @@ -99,7 +99,7 @@ std::unique_ptr<Literal> CreateR3LiteralWithLayout( for (auto inner_inner_list : inner_list) { int64 dim2 = 0; for (auto value : inner_inner_list) { - LiteralUtil::Set(literal.get(), {dim0, dim1, dim2}, value); + literal.get()->Set({dim0, dim1, dim2}, value); ++dim2; } ++dim1; diff --git a/tensorflow/compiler/xla/tests/tuple_test.cc b/tensorflow/compiler/xla/tests/tuple_test.cc index e9d523a31e..a4d96646a1 100644 --- a/tensorflow/compiler/xla/tests/tuple_test.cc +++ b/tensorflow/compiler/xla/tests/tuple_test.cc @@ -53,10 +53,10 @@ XLA_TEST_F(TupleTest, TupleCreate) { builder.ConstantR1<float>(constant_vector), builder.ConstantR2<float>(constant_matrix)}); - auto expected = LiteralUtil::MakeTuple( - {LiteralUtil::CreateR0<float>(constant_scalar).get(), - LiteralUtil::CreateR1<float>(constant_vector).get(), - LiteralUtil::CreateR2<float>(constant_matrix).get()}); + auto expected = + Literal::MakeTuple({Literal::CreateR0<float>(constant_scalar).get(), + Literal::CreateR1<float>(constant_vector).get(), + Literal::CreateR2<float>(constant_matrix).get()}); ComputeAndCompareTuple(&builder, *expected, {}, error_spec_); } @@ -67,9 +67,8 @@ XLA_TEST_F(TupleTest, TupleCreateWithZeroElementEntry) { auto result = builder.Tuple( {builder.ConstantR0<float>(7.0), builder.ConstantR1<float>({})}); - auto expected = - LiteralUtil::MakeTuple({LiteralUtil::CreateR0<float>(7.0).get(), - LiteralUtil::CreateR1<float>({}).get()}); + auto expected = Literal::MakeTuple({Literal::CreateR0<float>(7.0).get(), + Literal::CreateR1<float>({}).get()}); ComputeAndCompareTuple(&builder, *expected, {}, error_spec_); } @@ -77,7 +76,7 @@ XLA_TEST_F(TupleTest, TupleCreateWithZeroElementEntry) { XLA_TEST_F(TupleTest, EmptyTupleCreate) { ComputationBuilder builder(client_, TestName()); auto result = builder.Tuple({}); - auto expected = LiteralUtil::MakeTuple({}); + auto expected = Literal::MakeTuple({}); ComputeAndCompareTuple(&builder, *expected, {}, error_spec_); } @@ -146,9 +145,9 @@ XLA_TEST_F(TupleTest, TupleGTEToTuple) { builder.ConstantR2<float>(constant_matrix)}); auto new_tuple = builder.Tuple({builder.GetTupleElement(tuple_data, 1), builder.GetTupleElement(tuple_data, 0)}); - auto expected = LiteralUtil::MakeTuple( - {LiteralUtil::CreateR2<float>(constant_matrix).get(), - LiteralUtil::CreateR1<float>(constant_vector).get()}); + auto expected = + Literal::MakeTuple({Literal::CreateR2<float>(constant_matrix).get(), + Literal::CreateR1<float>(constant_vector).get()}); ComputeAndCompareTuple(&builder, *expected, {}, error_spec_); } @@ -212,9 +211,8 @@ XLA_TEST_F(TupleTest, DISABLED_ON_CPU_PARALLEL(SelectBetweenTuplesOnFalse)) { auto select = builder.Select(builder.ConstantR0<bool>(false), tuple12, tuple21); - auto expected = - LiteralUtil::MakeTuple({LiteralUtil::CreateR1<float>(vec2).get(), - LiteralUtil::CreateR1<float>(vec1).get()}); + auto expected = Literal::MakeTuple({Literal::CreateR1<float>(vec2).get(), + Literal::CreateR1<float>(vec1).get()}); ComputeAndCompareTuple(&builder, *expected, {}, error_spec_); } @@ -258,9 +256,8 @@ XLA_TEST_F(TupleTest, DISABLED_ON_CPU_PARALLEL(SelectBetweenTuplesOnTrue)) { auto select = builder.Select(builder.ConstantR0<bool>(true), tuple12, tuple21); - auto expected = - LiteralUtil::MakeTuple({LiteralUtil::CreateR1<float>(vec1).get(), - LiteralUtil::CreateR1<float>(vec2).get()}); + auto expected = Literal::MakeTuple({Literal::CreateR1<float>(vec1).get(), + Literal::CreateR1<float>(vec2).get()}); ComputeAndCompareTuple(&builder, *expected, {}, error_spec_); } @@ -339,9 +336,8 @@ XLA_TEST_F(TupleTest, auto select = builder.Select(builder.ConstantR0<bool>(false), tuple12, tuple21); - auto expected = - LiteralUtil::MakeTuple({LiteralUtil::CreateR1<float>(vec2).get(), - LiteralUtil::CreateR1<float>(vec1).get()}); + auto expected = Literal::MakeTuple({Literal::CreateR1<float>(vec2).get(), + Literal::CreateR1<float>(vec1).get()}); ComputeAndCompareTuple(&builder, *expected, {}, error_spec_); } @@ -352,13 +348,13 @@ XLA_TEST_F(TupleTest, NestedTuples) { auto outer_tuple = builder.Tuple({inner_tuple, builder.ConstantR1<float>({22.0, 44.0})}); - auto expected_v1 = LiteralUtil::CreateR1<float>({1.0, 2.0}); - auto expected_s = LiteralUtil::CreateR0<float>(42.0); + auto expected_v1 = Literal::CreateR1<float>({1.0, 2.0}); + auto expected_s = Literal::CreateR0<float>(42.0); auto expected_inner_tuple = - LiteralUtil::MakeTuple({expected_v1.get(), expected_s.get()}); - auto expected_v2 = LiteralUtil::CreateR1<float>({22.0, 44.0}); + Literal::MakeTuple({expected_v1.get(), expected_s.get()}); + auto expected_v2 = Literal::CreateR1<float>({22.0, 44.0}); auto expected = - LiteralUtil::MakeTuple({expected_inner_tuple.get(), expected_v2.get()}); + Literal::MakeTuple({expected_inner_tuple.get(), expected_v2.get()}); ComputeAndCompareTuple(&builder, *expected, {}, error_spec_); } @@ -378,14 +374,14 @@ XLA_TEST_F(TupleTest, GetTupleElementOfNestedTuple) { std::unique_ptr<GlobalData> data = client_ - ->TransferToServer(*LiteralUtil::MakeTuple({ - LiteralUtil::MakeTuple( + ->TransferToServer(*Literal::MakeTuple({ + Literal::MakeTuple( { - LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0}).get(), - LiteralUtil::CreateR1<float>({4.0, 5.0, 6.0}).get(), + Literal::CreateR1<float>({1.0, 2.0, 3.0}).get(), + Literal::CreateR1<float>({4.0, 5.0, 6.0}).get(), }) .get(), - LiteralUtil::CreateR1<float>({7.0, 8.0, 9.0}).get(), + Literal::CreateR1<float>({7.0, 8.0, 9.0}).get(), })) .ConsumeValueOrDie(); diff --git a/tensorflow/compiler/xla/tests/while_test.cc b/tensorflow/compiler/xla/tests/while_test.cc index 3aeeb29c1f..2df9197428 100644 --- a/tensorflow/compiler/xla/tests/while_test.cc +++ b/tensorflow/compiler/xla/tests/while_test.cc @@ -239,11 +239,11 @@ TEST_F(WhileTest, WhileWithTupleResult) { VLOG(2) << "while = " << ShapeUtil::HumanString( *builder.GetShape(result).ConsumeValueOrDie()); - auto expected_counter = LiteralUtil::CreateR0<int32>(5); - auto expected_data = LiteralUtil::CreateR1<float>( + auto expected_counter = Literal::CreateR0<int32>(5); + auto expected_data = Literal::CreateR1<float>( {5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f}); auto expected = - LiteralUtil::MakeTuple({expected_counter.get(), expected_data.get()}); + Literal::MakeTuple({expected_counter.get(), expected_data.get()}); VLOG(2) << "expected = " << ShapeUtil::HumanString(expected->shape()); ComputeAndCompareTuple(&builder, *expected, {}, ErrorSpec(0.0001)); } @@ -524,11 +524,11 @@ XLA_TEST_F(WhileTest, WhileWithDynamicUpdateSlice) { << ShapeUtil::HumanString( *builder.GetShape(result).ConsumeValueOrDie()); - auto expected_counter = LiteralUtil::CreateR0<int32>(5); - auto expected_data = LiteralUtil::CreateR1<float>( + auto expected_counter = Literal::CreateR0<int32>(5); + auto expected_data = Literal::CreateR1<float>( {1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f, 4.0f, 4.0f, 5.0f, 5.0f}); auto expected = - LiteralUtil::MakeTuple({expected_counter.get(), expected_data.get()}); + Literal::MakeTuple({expected_counter.get(), expected_data.get()}); VLOG(2) << "expected = " << ShapeUtil::HumanString(expected->shape()); ComputeAndCompareTuple(&builder, *expected, {}, ErrorSpec(0.0001)); } diff --git a/tensorflow/compiler/xla/text_literal_reader.cc b/tensorflow/compiler/xla/text_literal_reader.cc index 7876272467..afdc6726f1 100644 --- a/tensorflow/compiler/xla/text_literal_reader.cc +++ b/tensorflow/compiler/xla/text_literal_reader.cc @@ -104,8 +104,8 @@ StatusOr<std::unique_ptr<Literal>> TextLiteralReader::ReadAllLines() { auto result = MakeUnique<Literal>(); const float fill = std::numeric_limits<float>::quiet_NaN(); - LiteralUtil::PopulateWithValue<float>(fill, AsInt64Slice(shape.dimensions()), - result.get()); + result.get()->PopulateWithValue<float>(fill, + AsInt64Slice(shape.dimensions())); std::vector<tensorflow::StringPiece> pieces; std::vector<tensorflow::StringPiece> coordinates; std::vector<int64> coordinate_values; @@ -147,7 +147,7 @@ StatusOr<std::unique_ptr<Literal>> TextLiteralReader::ReadAllLines() { "\"%s\"", shape.dimensions_size(), coordinate_values.size(), line.c_str()); } - LiteralUtil::Set<float>(result.get(), coordinate_values, value); + result.get()->Set<float>(coordinate_values, value); } return std::move(result); } diff --git a/tensorflow/compiler/xla/text_literal_reader_test.cc b/tensorflow/compiler/xla/text_literal_reader_test.cc index a167d80f73..23070b6638 100644 --- a/tensorflow/compiler/xla/text_literal_reader_test.cc +++ b/tensorflow/compiler/xla/text_literal_reader_test.cc @@ -46,12 +46,12 @@ TEST(TextLiteralReaderTest, ReadsR3File) { TextLiteralReader::ReadPath(fname).ConsumeValueOrDie(); EXPECT_TRUE( ShapeUtil::Equal(ShapeUtil::MakeShape(F32, {1, 2, 3}), literal->shape())); - EXPECT_EQ(42.5, LiteralUtil::Get<float>(*literal, {0, 0, 0})); - EXPECT_EQ(43.5, LiteralUtil::Get<float>(*literal, {0, 0, 1})); - EXPECT_EQ(44.5, LiteralUtil::Get<float>(*literal, {0, 0, 2})); - EXPECT_EQ(45.5, LiteralUtil::Get<float>(*literal, {0, 1, 0})); - EXPECT_EQ(46.5, LiteralUtil::Get<float>(*literal, {0, 1, 1})); - EXPECT_EQ(47.5, LiteralUtil::Get<float>(*literal, {0, 1, 2})); + EXPECT_EQ(42.5, literal->Get<float>({0, 0, 0})); + EXPECT_EQ(43.5, literal->Get<float>({0, 0, 1})); + EXPECT_EQ(44.5, literal->Get<float>({0, 0, 2})); + EXPECT_EQ(45.5, literal->Get<float>({0, 1, 0})); + EXPECT_EQ(46.5, literal->Get<float>({0, 1, 1})); + EXPECT_EQ(47.5, literal->Get<float>({0, 1, 2})); } } // namespace diff --git a/tensorflow/compiler/xla/text_literal_writer.cc b/tensorflow/compiler/xla/text_literal_writer.cc index a5097e41cb..3fee467594 100644 --- a/tensorflow/compiler/xla/text_literal_writer.cc +++ b/tensorflow/compiler/xla/text_literal_writer.cc @@ -45,9 +45,9 @@ namespace xla { tensorflow::Status status; tensorflow::WritableFile* f_ptr = f.get(); - LiteralUtil::EachCellAsString( - literal, [f_ptr, &status](tensorflow::gtl::ArraySlice<int64> indices, - const string& value) { + literal.EachCellAsString( + [f_ptr, &status](tensorflow::gtl::ArraySlice<int64> indices, + const string& value) { if (!status.ok()) { return; } diff --git a/tensorflow/compiler/xla/text_literal_writer_test.cc b/tensorflow/compiler/xla/text_literal_writer_test.cc index 177ae4ea03..70cf2fb1b8 100644 --- a/tensorflow/compiler/xla/text_literal_writer_test.cc +++ b/tensorflow/compiler/xla/text_literal_writer_test.cc @@ -30,7 +30,7 @@ namespace xla { namespace { TEST(TextLiteralWriterTest, WritesFloatLiteral) { - auto literal = LiteralUtil::CreateR2<float>({ + auto literal = Literal::CreateR2<float>({ {3.14, 2.17}, {1.23, 4.56}, }); string path = diff --git a/tensorflow/compiler/xla/tools/replay_computation.cc b/tensorflow/compiler/xla/tools/replay_computation.cc index 3a75bf6495..6228ca34c0 100644 --- a/tensorflow/compiler/xla/tools/replay_computation.cc +++ b/tensorflow/compiler/xla/tools/replay_computation.cc @@ -98,11 +98,11 @@ void RealMain(tensorflow::gtl::ArraySlice<char*> args, bool use_fake_data) { std::unique_ptr<Literal> result = result_status.ConsumeValueOrDie(); fprintf(stdout, "%s: %s :: %s:%s\n", arg, module.entry().name().c_str(), ShapeUtil::HumanString(result->shape()).c_str(), - LiteralUtil::ToString(*result).c_str()); + result->ToString().c_str()); if (module.has_result()) { fprintf(stdout, "was %s:%s\n", ShapeUtil::HumanString(module.result().shape()).c_str(), - LiteralUtil::ToString(Literal(module.result())).c_str()); + Literal(module.result()).ToString().c_str()); } } } diff --git a/tensorflow/compiler/xla/tools/show_literal.cc b/tensorflow/compiler/xla/tools/show_literal.cc index b6538f5de0..b50cb5e28e 100644 --- a/tensorflow/compiler/xla/tools/show_literal.cc +++ b/tensorflow/compiler/xla/tools/show_literal.cc @@ -42,5 +42,5 @@ int main(int argc, char **argv) { &literal_proto)); xla::Literal literal(literal_proto); LOG(INFO) << "literal: " << literal_proto.ShortDebugString(); - fprintf(stderr, "%s\n", xla::LiteralUtil::ToString(literal).c_str()); + fprintf(stderr, "%s\n", literal.ToString().c_str()); } diff --git a/tensorflow/compiler/xla/tools/show_text_literal.cc b/tensorflow/compiler/xla/tools/show_text_literal.cc index 2d983b407c..bbe9902aa1 100644 --- a/tensorflow/compiler/xla/tools/show_text_literal.cc +++ b/tensorflow/compiler/xla/tools/show_text_literal.cc @@ -40,7 +40,7 @@ int main(int argc, char **argv) { xla::TextLiteralReader::ReadPath(argv[1]).ConsumeValueOrDie(); LOG(INFO) << "literal: " << literal->ShortDebugString(); - fprintf(stderr, "%s\n", xla::LiteralUtil::ToString(*literal).c_str()); + fprintf(stderr, "%s\n", literal->ToString().c_str()); if (literal->shape().element_type() == xla::F32) { float min = *std::min_element(literal->f32s().begin(), literal->f32s().end()); |