aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2017-05-30 10:52:47 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-05-30 10:56:18 -0700
commit7280dafca161eb3413ea120d3dd07c63e5254e72 (patch)
tree7e14a2994ce2fd68ce5e58d0dd0b5960cf1f93d9
parent6c3b15915d7475aed4484e47361e7cd0871678f4 (diff)
Use "empty" member function to test for emptiness
PiperOrigin-RevId: 157483181
-rw-r--r--tensorflow/compiler/jit/graphcycles/graphcycles_test.cc8
-rw-r--r--tensorflow/compiler/xla/legacy_flags/layout_util_flags.cc2
-rw-r--r--tensorflow/compiler/xla/metric_table_report.cc4
-rw-r--r--tensorflow/compiler/xla/service/shape_inference.cc4
-rw-r--r--tensorflow/core/common_runtime/session_state.cc2
-rw-r--r--tensorflow/core/common_runtime/simple_placer.cc2
-rw-r--r--tensorflow/core/distributed_runtime/scheduler.cc2
-rw-r--r--tensorflow/core/graph/costmodel.cc6
-rw-r--r--tensorflow/core/kernels/fifo_queue.cc2
-rw-r--r--tensorflow/core/kernels/padding_fifo_queue.cc2
-rw-r--r--tensorflow/core/kernels/random_shuffle_queue_op.cc2
-rw-r--r--tensorflow/core/kernels/range_sampler.cc6
-rw-r--r--tensorflow/core/kernels/sample_distorted_bounding_box_op.cc2
-rw-r--r--tensorflow/core/kernels/shuffle_dataset_op.cc2
-rw-r--r--tensorflow/core/kernels/sparse_matmul_op.cc4
-rw-r--r--tensorflow/core/kernels/spectrogram_test_utils.cc2
-rw-r--r--tensorflow/core/kernels/stack_ops.cc2
-rw-r--r--tensorflow/core/kernels/string_split_op.cc2
-rw-r--r--tensorflow/core/kernels/summary_op.cc2
-rw-r--r--tensorflow/core/kernels/tensor_array_ops.cc2
-rw-r--r--tensorflow/core/lib/io/record_reader.cc4
-rw-r--r--tensorflow/core/lib/io/snappy/snappy_inputbuffer.cc2
-rw-r--r--tensorflow/core/lib/io/table_test.cc2
-rw-r--r--tensorflow/core/lib/io/zlib_inputstream.cc2
-rw-r--r--tensorflow/core/platform/file_system_test.cc4
-rw-r--r--tensorflow/core/platform/posix/load_library.cc2
-rw-r--r--tensorflow/core/util/example_proto_fast_parsing.cc4
-rw-r--r--tensorflow/core/util/example_proto_helper.cc2
-rw-r--r--tensorflow/core/util/memmapped_file_system_writer.cc2
-rw-r--r--tensorflow/stream_executor/kernel_spec.cc4
-rw-r--r--tensorflow/tools/graph_transforms/freeze_requantization_ranges.cc2
-rw-r--r--tensorflow/tools/graph_transforms/quantize_nodes.cc2
-rw-r--r--tensorflow/tools/graph_transforms/set_device.cc2
-rw-r--r--tensorflow/tools/graph_transforms/strip_unused_nodes.cc2
-rw-r--r--tensorflow/tools/graph_transforms/summarize_graph_main.cc2
-rw-r--r--tensorflow/tools/graph_transforms/transform_graph.cc2
-rw-r--r--tensorflow/tools/graph_transforms/transform_utils.cc2
37 files changed, 51 insertions, 51 deletions
diff --git a/tensorflow/compiler/jit/graphcycles/graphcycles_test.cc b/tensorflow/compiler/jit/graphcycles/graphcycles_test.cc
index 5863d50c86..e47b782207 100644
--- a/tensorflow/compiler/jit/graphcycles/graphcycles_test.cc
+++ b/tensorflow/compiler/jit/graphcycles/graphcycles_test.cc
@@ -243,7 +243,7 @@ TEST(GraphCycles, RandomizedTest) {
break;
case 1: // Remove a node
- if (nodes.size() > 0) {
+ if (!nodes.empty()) {
int node_index = RandomNode(&rnd, &nodes);
int node = nodes[node_index];
nodes[node_index] = nodes.back();
@@ -263,7 +263,7 @@ TEST(GraphCycles, RandomizedTest) {
break;
case 2: // Add an edge
- if (nodes.size() > 0) {
+ if (!nodes.empty()) {
int from = RandomNode(&rnd, &nodes);
int to = RandomNode(&rnd, &nodes);
if (EdgeIndex(&edges, nodes[from], nodes[to]) == -1) {
@@ -282,7 +282,7 @@ TEST(GraphCycles, RandomizedTest) {
break;
case 3: // Remove an edge
- if (edges.size() > 0) {
+ if (!edges.empty()) {
int i = RandomEdge(&rnd, &edges);
int from = edges[i].from;
int to = edges[i].to;
@@ -296,7 +296,7 @@ TEST(GraphCycles, RandomizedTest) {
break;
case 4: // Check a path
- if (nodes.size() > 0) {
+ if (!nodes.empty()) {
int from = RandomNode(&rnd, &nodes);
int to = RandomNode(&rnd, &nodes);
int32 path[2 * kMaxNodes];
diff --git a/tensorflow/compiler/xla/legacy_flags/layout_util_flags.cc b/tensorflow/compiler/xla/legacy_flags/layout_util_flags.cc
index 4242b501d4..f838861898 100644
--- a/tensorflow/compiler/xla/legacy_flags/layout_util_flags.cc
+++ b/tensorflow/compiler/xla/legacy_flags/layout_util_flags.cc
@@ -53,7 +53,7 @@ static void AllocateRawFlag() {
static bool ParseDefaultLayout(const string& text, DefaultLayout* layout) {
bool result = true;
std::vector<string> field = tensorflow::str_util::Split(text, ':');
- if (field.size() > 0) {
+ if (!field.empty()) {
if (field[0] == "random") {
layout->dimension_order = DefaultLayout::DimensionOrder::kRandom;
if (field.size() > 1) {
diff --git a/tensorflow/compiler/xla/metric_table_report.cc b/tensorflow/compiler/xla/metric_table_report.cc
index 0d4ddc2392..f8985b2c4f 100644
--- a/tensorflow/compiler/xla/metric_table_report.cc
+++ b/tensorflow/compiler/xla/metric_table_report.cc
@@ -150,7 +150,7 @@ void MetricTableReport::AppendCategoryTable() {
// Show the category.
string text = category.category_text;
- if (text == "") {
+ if (text.empty()) {
text = "[no category]";
}
tensorflow::strings::StrAppend(&text, " (", category.entries.size(), " ",
@@ -200,7 +200,7 @@ void MetricTableReport::AppendEntryTable() {
metric_sum += entry.metric;
string text = entry.text;
- if (text == "") {
+ if (text.empty()) {
text = "[no entry text]";
}
AppendTableRow(text, entry.metric, metric_sum);
diff --git a/tensorflow/compiler/xla/service/shape_inference.cc b/tensorflow/compiler/xla/service/shape_inference.cc
index 7793408328..0840fdf930 100644
--- a/tensorflow/compiler/xla/service/shape_inference.cc
+++ b/tensorflow/compiler/xla/service/shape_inference.cc
@@ -227,7 +227,7 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
/* static */ StatusOr<Shape> ShapeInference::InferConcatOpShape(
tensorflow::gtl::ArraySlice<const Shape*> arg_shapes,
const int64 dimension) {
- if (arg_shapes.size() == 0) {
+ if (arg_shapes.empty()) {
return InvalidArgument("Concatenate expects at least one argument");
}
if (dimension < 0 || dimension >= ShapeUtil::Rank(*arg_shapes[0])) {
@@ -679,7 +679,7 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(
/* static */ StatusOr<Shape> ShapeInference::InferMapShape(
tensorflow::gtl::ArraySlice<const Shape*> arg_shapes,
const ProgramShape& to_apply) {
- if (arg_shapes.size() == 0) {
+ if (arg_shapes.empty()) {
return InvalidArgument("Map expects at least one argument");
}
diff --git a/tensorflow/core/common_runtime/session_state.cc b/tensorflow/core/common_runtime/session_state.cc
index 7e7200070d..6befa53dff 100644
--- a/tensorflow/core/common_runtime/session_state.cc
+++ b/tensorflow/core/common_runtime/session_state.cc
@@ -66,7 +66,7 @@ Status TensorStore::AddTensor(const string& name, const TensorAndKey& tk) {
Status TensorStore::SaveTensors(const std::vector<string>& output_names,
SessionState* session_state) {
mutex_lock l(lock_);
- if (tensors_.size() != 0) {
+ if (!tensors_.empty()) {
// Save only the tensors in output_names in the session.
for (const string& name : output_names) {
TensorId id(ParseTensorName(name));
diff --git a/tensorflow/core/common_runtime/simple_placer.cc b/tensorflow/core/common_runtime/simple_placer.cc
index 59bf0544c1..13a5133887 100644
--- a/tensorflow/core/common_runtime/simple_placer.cc
+++ b/tensorflow/core/common_runtime/simple_placer.cc
@@ -244,7 +244,7 @@ class ColocationGraph {
// members_[old_root].supported_device_types.
MergeSupportedDevices(&members_[new_root].supported_device_types,
members_[old_root].supported_device_types);
- if (members_[new_root].supported_device_types.size() == 0) {
+ if (members_[new_root].supported_device_types.empty()) {
string debug_info;
AddDebugInfo(x_root, &debug_info);
AddDebugInfo(y_root, &debug_info);
diff --git a/tensorflow/core/distributed_runtime/scheduler.cc b/tensorflow/core/distributed_runtime/scheduler.cc
index 6b18db5332..0b628205c3 100644
--- a/tensorflow/core/distributed_runtime/scheduler.cc
+++ b/tensorflow/core/distributed_runtime/scheduler.cc
@@ -264,7 +264,7 @@ Microseconds GreedyScheduler::ComputeSchedule(
for (auto& x : device_states_) {
Sim* sim = x.second;
while (sim->num_running < sim->degree_parallelism &&
- sim->ready_nodes.size() > 0) {
+ !sim->ready_nodes.empty()) {
Event e;
e.node = GetNodeWithHighestPriority(sim->ready_nodes);
e.time = event.time + cost_model_->TimeEstimate(e.node);
diff --git a/tensorflow/core/graph/costmodel.cc b/tensorflow/core/graph/costmodel.cc
index db6e1b8d6f..69247a4f62 100644
--- a/tensorflow/core/graph/costmodel.cc
+++ b/tensorflow/core/graph/costmodel.cc
@@ -60,7 +60,7 @@ void CostModel::MergeFromLocal(const Graph& g, const CostModel& cm) {
time_[global_id] += cm.time_[local_id];
int num_slots = cm.slot_bytes_[local_id].size();
if (num_slots > 0) {
- if (slot_bytes_[global_id].size() == 0) {
+ if (slot_bytes_[global_id].empty()) {
slot_bytes_[global_id].resize(num_slots);
} else {
CHECK_EQ(num_slots, slot_bytes_[global_id].size());
@@ -82,7 +82,7 @@ void CostModel::MergeFromGlobal(const CostModel& cm) {
time_[i] += cm.time_[i];
int num_slots = cm.slot_bytes_[i].size();
if (num_slots > 0) {
- if (slot_bytes_[i].size() == 0) {
+ if (slot_bytes_[i].empty()) {
slot_bytes_[i].resize(num_slots);
} else {
CHECK_EQ(num_slots, slot_bytes_[i].size());
@@ -138,7 +138,7 @@ void CostModel::SetNumOutputs(const Node* node, int num_outputs) {
auto perslot = &slot_bytes_[id];
auto max_mem_usage = &max_mem_usage_[id];
auto output_port_alloc_ids = &output_port_alloc_ids_[id];
- if (perslot->size() > 0) {
+ if (!perslot->empty()) {
CHECK_EQ(num_outputs, perslot->size()) << "Cannot resize slot_bytes, node="
<< node->name();
} else {
diff --git a/tensorflow/core/kernels/fifo_queue.cc b/tensorflow/core/kernels/fifo_queue.cc
index c426efc7f9..030cf8a49d 100644
--- a/tensorflow/core/kernels/fifo_queue.cc
+++ b/tensorflow/core/kernels/fifo_queue.cc
@@ -281,7 +281,7 @@ void FIFOQueue::TryDequeueMany(int num_elements, OpKernelContext* ctx,
}
}
}
- if (allow_small_batch && queues_[0].size() > 0) {
+ if (allow_small_batch && !queues_[0].empty()) {
// Request all remaining elements in the queue.
queue_size = queues_[0].size();
attempt->tuple.clear();
diff --git a/tensorflow/core/kernels/padding_fifo_queue.cc b/tensorflow/core/kernels/padding_fifo_queue.cc
index c664d7eaf5..f4626d4a5d 100644
--- a/tensorflow/core/kernels/padding_fifo_queue.cc
+++ b/tensorflow/core/kernels/padding_fifo_queue.cc
@@ -119,7 +119,7 @@ void PaddingFIFOQueue::TryDequeueMany(int num_elements, OpKernelContext* ctx,
}
}
}
- if (allow_small_batch && queues_[0].size() > 0) {
+ if (allow_small_batch && !queues_[0].empty()) {
// Request all remaining elements in the queue.
queue_size = queues_[0].size();
attempt->tuples.clear();
diff --git a/tensorflow/core/kernels/random_shuffle_queue_op.cc b/tensorflow/core/kernels/random_shuffle_queue_op.cc
index d3987eeb32..d9efb5fe7d 100644
--- a/tensorflow/core/kernels/random_shuffle_queue_op.cc
+++ b/tensorflow/core/kernels/random_shuffle_queue_op.cc
@@ -358,7 +358,7 @@ void RandomShuffleQueue::TryDequeueMany(int num_elements, OpKernelContext* ctx,
}
}
}
- if (allow_small_batch && queues_[0].size() > 0) {
+ if (allow_small_batch && !queues_[0].empty()) {
// Request all remaining elements in the queue.
queue_size = queues_[0].size();
attempt->tuple.clear();
diff --git a/tensorflow/core/kernels/range_sampler.cc b/tensorflow/core/kernels/range_sampler.cc
index 7e57331ab4..d682cd3b52 100644
--- a/tensorflow/core/kernels/range_sampler.cc
+++ b/tensorflow/core/kernels/range_sampler.cc
@@ -105,7 +105,7 @@ void RangeSampler::SampleBatchGetExpectedCountAvoid(
num_tries = batch_size;
}
// Compute the expected counts of the batch and the extra values
- if (batch_expected_count.size() > 0) {
+ if (!batch_expected_count.empty()) {
CHECK_EQ(batch_size, batch_expected_count.size());
for (int i = 0; i < batch_size; i++) {
batch_expected_count[i] =
@@ -131,7 +131,7 @@ void AllSampler::SampleBatchGetExpectedCountAvoid(
for (int i = 0; i < batch_size; i++) {
batch[i] = i;
}
- if (batch_expected_count.size() > 0) {
+ if (!batch_expected_count.empty()) {
CHECK_EQ(batch_size, batch_expected_count.size());
for (int i = 0; i < batch_size; i++) {
batch_expected_count[i] = 1;
@@ -290,7 +290,7 @@ Status FixedUnigramSampler::LoadFromFile(Env* env, const string& vocab_file,
// The vocabulary file should be in csv like format, with the last
// field the weight associated with the word.
std::vector<string> cols = str_util::Split(line, ',');
- if (cols.size() == 0) continue;
+ if (cols.empty()) continue;
// Skip entries that do not belong to this shard.
if (word_id % num_shards_ == shard_) {
float w = 0.0;
diff --git a/tensorflow/core/kernels/sample_distorted_bounding_box_op.cc b/tensorflow/core/kernels/sample_distorted_bounding_box_op.cc
index 96fff23f47..4dae5da635 100644
--- a/tensorflow/core/kernels/sample_distorted_bounding_box_op.cc
+++ b/tensorflow/core/kernels/sample_distorted_bounding_box_op.cc
@@ -298,7 +298,7 @@ class SampleDistortedBoundingBoxOp : public OpKernel {
// Insert the entire image if no bounding boxes are supplied.
const Rectangle image_rect(0, 0, width, height);
- if (bounding_boxes.size() < 1) {
+ if (bounding_boxes.empty()) {
OP_REQUIRES(context, use_image_if_no_bounding_boxes_,
errors::InvalidArgument(
"No bounding boxes provided as input. One must "
diff --git a/tensorflow/core/kernels/shuffle_dataset_op.cc b/tensorflow/core/kernels/shuffle_dataset_op.cc
index c7f5cee3af..7156e5155f 100644
--- a/tensorflow/core/kernels/shuffle_dataset_op.cc
+++ b/tensorflow/core/kernels/shuffle_dataset_op.cc
@@ -127,7 +127,7 @@ class ShuffleDatasetOp : public OpKernel {
}
}
- if (buffer_.size() > 0) {
+ if (!buffer_.empty()) {
*end_of_sequence = false;
// Choose an element to produce uniformly at random, and
// swap the last element into its place in the buffer.
diff --git a/tensorflow/core/kernels/sparse_matmul_op.cc b/tensorflow/core/kernels/sparse_matmul_op.cc
index 22c8bc48b4..d109543494 100644
--- a/tensorflow/core/kernels/sparse_matmul_op.cc
+++ b/tensorflow/core/kernels/sparse_matmul_op.cc
@@ -607,8 +607,8 @@ inline void GEPP(
}
for (const auto* left_slice : left_slices) {
const auto& left = *left_slice;
- const auto* data3 = (left.data3.size() > 0) ? &left.data3[0] : nullptr;
- const auto* data = (left.data.size() > 0) ? &left.data[0] : nullptr;
+ const auto* data3 = (!left.data3.empty()) ? &left.data3[0] : nullptr;
+ const auto* data = (!left.data.empty()) ? &left.data[0] : nullptr;
const int num_blocks = left.index3_offset.size();
int begin3 = 0;
int begin = 0;
diff --git a/tensorflow/core/kernels/spectrogram_test_utils.cc b/tensorflow/core/kernels/spectrogram_test_utils.cc
index a2141c649f..78d49e474a 100644
--- a/tensorflow/core/kernels/spectrogram_test_utils.cc
+++ b/tensorflow/core/kernels/spectrogram_test_utils.cc
@@ -100,7 +100,7 @@ void ReadCSVFileToComplexVectorOrDie(
}
std::vector<string> lines = str_util::Split(data_string, '\n');
for (const string& line : lines) {
- if (line == "") {
+ if (line.empty()) {
continue;
}
std::vector<std::complex<double> > data_line;
diff --git a/tensorflow/core/kernels/stack_ops.cc b/tensorflow/core/kernels/stack_ops.cc
index 241d39ba44..b4698a8053 100644
--- a/tensorflow/core/kernels/stack_ops.cc
+++ b/tensorflow/core/kernels/stack_ops.cc
@@ -147,7 +147,7 @@ class StackOp : public OpKernel {
explicit StackOp(OpKernelConstruction* context) : OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("elem_type", &elem_type_));
OP_REQUIRES_OK(context, context->GetAttr("stack_name", &stack_name_));
- if (stack_name_ == "") stack_name_ = name();
+ if (stack_name_.empty()) stack_name_ = name();
}
void Compute(OpKernelContext* ctx) override {
diff --git a/tensorflow/core/kernels/string_split_op.cc b/tensorflow/core/kernels/string_split_op.cc
index 3226e5e0f8..d7b804daeb 100644
--- a/tensorflow/core/kernels/string_split_op.cc
+++ b/tensorflow/core/kernels/string_split_op.cc
@@ -29,7 +29,7 @@ namespace tensorflow {
namespace {
std::vector<string> Split(const string& str, const string& delimiter) {
- if (delimiter.size()) {
+ if (!delimiter.empty()) {
return str_util::Split(str, delimiter, str_util::SkipEmpty());
}
std::vector<string> char_vector(str.size());
diff --git a/tensorflow/core/kernels/summary_op.cc b/tensorflow/core/kernels/summary_op.cc
index d8596ba9ea..b818724ec2 100644
--- a/tensorflow/core/kernels/summary_op.cc
+++ b/tensorflow/core/kernels/summary_op.cc
@@ -149,7 +149,7 @@ class SummaryMergeOp : public OpKernel {
const string& tag = summary_in.value(v).tag();
// The tag is unused by the TensorSummary op, so no need to check
// for duplicates.
- if ((tag != "") && !tags.insert(tag).second) {
+ if ((!tag.empty()) && !tags.insert(tag).second) {
c->SetStatus(errors::InvalidArgument(strings::StrCat(
"Duplicate tag ", tag, " found in summary inputs")));
return;
diff --git a/tensorflow/core/kernels/tensor_array_ops.cc b/tensorflow/core/kernels/tensor_array_ops.cc
index bd7556658a..b46b405ffb 100644
--- a/tensorflow/core/kernels/tensor_array_ops.cc
+++ b/tensorflow/core/kernels/tensor_array_ops.cc
@@ -156,7 +156,7 @@ class TensorArrayOp : public TensorArrayCreationOp {
context->GetAttr("clear_after_read", &clear_after_read_));
OP_REQUIRES_OK(context,
context->GetAttr("tensor_array_name", &tensor_array_name_));
- if (tensor_array_name_ == "") tensor_array_name_ = name();
+ if (tensor_array_name_.empty()) tensor_array_name_ = name();
}
Status CreateTensorArray(OpKernelContext* ctx, ResourceMgr* rm,
diff --git a/tensorflow/core/lib/io/record_reader.cc b/tensorflow/core/lib/io/record_reader.cc
index 450f10d299..ff2fd48de9 100644
--- a/tensorflow/core/lib/io/record_reader.cc
+++ b/tensorflow/core/lib/io/record_reader.cc
@@ -102,7 +102,7 @@ Status RecordReader::ReadChecksummed(uint64 offset, size_t n,
TF_RETURN_IF_ERROR(zlib_input_stream_->ReadNBytes(expected, storage));
if (storage->size() != expected) {
- if (storage->size() == 0) {
+ if (storage->empty()) {
return errors::OutOfRange("eof");
} else {
return errors::DataLoss("truncated record at ", offset);
@@ -121,7 +121,7 @@ Status RecordReader::ReadChecksummed(uint64 offset, size_t n,
StringPiece data;
TF_RETURN_IF_ERROR(src_->Read(offset, expected, &data, &(*storage)[0]));
if (data.size() != expected) {
- if (data.size() == 0) {
+ if (data.empty()) {
return errors::OutOfRange("eof");
} else {
return errors::DataLoss("truncated record at ", offset);
diff --git a/tensorflow/core/lib/io/snappy/snappy_inputbuffer.cc b/tensorflow/core/lib/io/snappy/snappy_inputbuffer.cc
index 39202e9237..3088d4d4b9 100644
--- a/tensorflow/core/lib/io/snappy/snappy_inputbuffer.cc
+++ b/tensorflow/core/lib/io/snappy/snappy_inputbuffer.cc
@@ -183,7 +183,7 @@ Status SnappyInputBuffer::ReadFromFile() {
// possible that on the last read there isn't enough data in the file to
// fill up the buffer in which case file_->ReadNBytes would return an
// OutOfRange error.
- if (data.size() == 0) {
+ if (data.empty()) {
return errors::OutOfRange("EOF reached");
}
if (errors::IsOutOfRange(s)) {
diff --git a/tensorflow/core/lib/io/table_test.cc b/tensorflow/core/lib/io/table_test.cc
index d479c2d533..aed3ef9c07 100644
--- a/tensorflow/core/lib/io/table_test.cc
+++ b/tensorflow/core/lib/io/table_test.cc
@@ -396,7 +396,7 @@ class Harness : public ::testing::Test {
break;
case 1: {
// Attempt to return something smaller than an existing key
- if (result.size() > 0 && result[result.size() - 1] > '\0') {
+ if (!result.empty() && result[result.size() - 1] > '\0') {
result[result.size() - 1]--;
}
break;
diff --git a/tensorflow/core/lib/io/zlib_inputstream.cc b/tensorflow/core/lib/io/zlib_inputstream.cc
index d019b65510..85a1fc032f 100644
--- a/tensorflow/core/lib/io/zlib_inputstream.cc
+++ b/tensorflow/core/lib/io/zlib_inputstream.cc
@@ -110,7 +110,7 @@ Status ZlibInputStream::ReadFromStream() {
// possible that on the last read there isn't enough data in the stream to
// fill up the buffer in which case input_stream_->ReadNBytes would return an
// OutOfRange error.
- if (data.size() == 0) {
+ if (data.empty()) {
return errors::OutOfRange("EOF reached");
}
if (errors::IsOutOfRange(s)) {
diff --git a/tensorflow/core/platform/file_system_test.cc b/tensorflow/core/platform/file_system_test.cc
index 47d6ce73bb..abe88ab6c7 100644
--- a/tensorflow/core/platform/file_system_test.cc
+++ b/tensorflow/core/platform/file_system_test.cc
@@ -264,7 +264,7 @@ class TestFileSystem : public NullFileSystem {
public:
// Only allow for a single root directory.
Status IsDirectory(const string& dirname) override {
- if (dirname == "." || dirname == "") {
+ if (dirname == "." || dirname.empty()) {
return Status::OK();
}
return Status(tensorflow::error::FAILED_PRECONDITION, "Not a dir");
@@ -272,7 +272,7 @@ class TestFileSystem : public NullFileSystem {
// Simulating a FS with a root dir and a single file underneath it.
Status GetChildren(const string& dir, std::vector<string>* result) override {
- if (dir == "." || dir == "") {
+ if (dir == "." || dir.empty()) {
result->push_back("test");
}
return Status::OK();
diff --git a/tensorflow/core/platform/posix/load_library.cc b/tensorflow/core/platform/posix/load_library.cc
index d795b6058f..8fad53560c 100644
--- a/tensorflow/core/platform/posix/load_library.cc
+++ b/tensorflow/core/platform/posix/load_library.cc
@@ -49,7 +49,7 @@ string FormatLibraryFileName(const string& name, const string& version) {
filename = "lib" + name + "." + version + ".dylib";
}
#else
- if (version.size() == 0) {
+ if (version.empty()) {
filename = "lib" + name + ".so";
} else {
filename = "lib" + name + ".so" + "." + version;
diff --git a/tensorflow/core/util/example_proto_fast_parsing.cc b/tensorflow/core/util/example_proto_fast_parsing.cc
index 096c60539e..c2cbc66722 100644
--- a/tensorflow/core/util/example_proto_fast_parsing.cc
+++ b/tensorflow/core/util/example_proto_fast_parsing.cc
@@ -935,8 +935,8 @@ Status FastParseExample(const Config& config,
for (size_t e = start; e < end; ++e) {
status_of_minibatch[minibatch] = FastParseSerializedExample(
serialized[e],
- (example_names.size() > 0 ? example_names[e] : "<unknown>"), e,
- config, config_index, hasher, &fixed_dense_values,
+ (!example_names.empty() ? example_names[e] : "<unknown>"), e, config,
+ config_index, hasher, &fixed_dense_values,
&varlen_dense_buffers[minibatch], &sparse_buffers[minibatch]);
if (!status_of_minibatch[minibatch].ok()) break;
}
diff --git a/tensorflow/core/util/example_proto_helper.cc b/tensorflow/core/util/example_proto_helper.cc
index 8e406fde5e..5ba6cb77b4 100644
--- a/tensorflow/core/util/example_proto_helper.cc
+++ b/tensorflow/core/util/example_proto_helper.cc
@@ -323,7 +323,7 @@ Status BatchExampleProtoToTensors(
std::vector<Tensor>* output_sparse_shapes_tensor) {
const int batch_size = examples.size();
- const bool has_names = (names.size() > 0);
+ const bool has_names = (!names.empty());
if (has_names) {
if (names.size() != examples.size()) {
return errors::InvalidArgument(
diff --git a/tensorflow/core/util/memmapped_file_system_writer.cc b/tensorflow/core/util/memmapped_file_system_writer.cc
index 7e87f4539c..9556ee385f 100644
--- a/tensorflow/core/util/memmapped_file_system_writer.cc
+++ b/tensorflow/core/util/memmapped_file_system_writer.cc
@@ -41,7 +41,7 @@ Status MemmappedFileSystemWriter::SaveTensor(const Tensor& tensor,
" and include [A-Za-z0-9_.]");
}
const auto tensor_data = tensor.tensor_data();
- if (0 == tensor_data.size()) {
+ if (tensor_data.empty()) {
return errors::InvalidArgument(
"MemmappedEnvWritter: saving tensor with 0 size");
}
diff --git a/tensorflow/stream_executor/kernel_spec.cc b/tensorflow/stream_executor/kernel_spec.cc
index b4f2aa4649..0404c573f0 100644
--- a/tensorflow/stream_executor/kernel_spec.cc
+++ b/tensorflow/stream_executor/kernel_spec.cc
@@ -103,7 +103,7 @@ const char *CudaPtxInMemory::default_text() const {
if (decompressed_ptx_iter != decompressed_ptx_.end()) {
// If the decompressed string is empty, which means the ptx hasn't been
// decompressed, decompress it here.
- if (decompressed_ptx_iter->second.size() == 0) {
+ if (decompressed_ptx_iter->second.empty()) {
decompressed_ptx_iter->second = DecompressPtx(ptx);
}
return decompressed_ptx_iter->second.c_str();
@@ -136,7 +136,7 @@ const char *CudaPtxInMemory::text(int compute_capability_major,
if (decompressed_ptx_iter != decompressed_ptx_.end()) {
// If the decompressed string is empty, which means the ptx hasn't been
// decompressed, decompress it here.
- if (decompressed_ptx_iter->second.size() == 0) {
+ if (decompressed_ptx_iter->second.empty()) {
decompressed_ptx_iter->second = DecompressPtx(ptx_iter->second);
}
return decompressed_ptx_iter->second.c_str();
diff --git a/tensorflow/tools/graph_transforms/freeze_requantization_ranges.cc b/tensorflow/tools/graph_transforms/freeze_requantization_ranges.cc
index c20521e41b..6ff519ad52 100644
--- a/tensorflow/tools/graph_transforms/freeze_requantization_ranges.cc
+++ b/tensorflow/tools/graph_transforms/freeze_requantization_ranges.cc
@@ -108,7 +108,7 @@ Status FreezeRequantizationRanges(const GraphDef& input_graph_def,
string min_max_log_file;
TF_RETURN_IF_ERROR(
context.GetOneStringParameter("min_max_log_file", "", &min_max_log_file));
- if (min_max_log_file == "") {
+ if (min_max_log_file.empty()) {
return errors::InvalidArgument(
"You must pass a file name to min_max_log_file");
}
diff --git a/tensorflow/tools/graph_transforms/quantize_nodes.cc b/tensorflow/tools/graph_transforms/quantize_nodes.cc
index 5497ad008b..14141ca788 100644
--- a/tensorflow/tools/graph_transforms/quantize_nodes.cc
+++ b/tensorflow/tools/graph_transforms/quantize_nodes.cc
@@ -149,7 +149,7 @@ string UniqueNodeNameFromInput(const string& input_name) {
result += "__hat__";
}
result += node_name;
- if (suffix != "") {
+ if (!suffix.empty()) {
result += "__port__" + suffix.substr(1, suffix.size() - 1);
}
return result;
diff --git a/tensorflow/tools/graph_transforms/set_device.cc b/tensorflow/tools/graph_transforms/set_device.cc
index 4e4529f4b6..e1712f63c8 100644
--- a/tensorflow/tools/graph_transforms/set_device.cc
+++ b/tensorflow/tools/graph_transforms/set_device.cc
@@ -32,7 +32,7 @@ Status SetDevice(const GraphDef& input_graph_def,
for (const NodeDef& node : input_graph_def.node()) {
NodeDef* new_node = output_graph_def->mutable_node()->Add();
new_node->CopyFrom(node);
- if (!if_default || (node.device() == "")) {
+ if (!if_default || (node.device().empty())) {
new_node->set_device(new_device);
}
}
diff --git a/tensorflow/tools/graph_transforms/strip_unused_nodes.cc b/tensorflow/tools/graph_transforms/strip_unused_nodes.cc
index 786bf4f6da..180fd373a2 100644
--- a/tensorflow/tools/graph_transforms/strip_unused_nodes.cc
+++ b/tensorflow/tools/graph_transforms/strip_unused_nodes.cc
@@ -76,7 +76,7 @@ Status TypeForPlaceholder(const TransformFuncContext& context,
// Takes a comma-separated string of numbers and parses them into a shape.
bool TensorShapeFromString(const string& shape_string, TensorShape* result) {
- if (shape_string == "") {
+ if (shape_string.empty()) {
return false;
}
std::vector<int64> dims;
diff --git a/tensorflow/tools/graph_transforms/summarize_graph_main.cc b/tensorflow/tools/graph_transforms/summarize_graph_main.cc
index 6db500f1e0..f8ff5ece36 100644
--- a/tensorflow/tools/graph_transforms/summarize_graph_main.cc
+++ b/tensorflow/tools/graph_transforms/summarize_graph_main.cc
@@ -184,7 +184,7 @@ Status SummarizeGraph(const GraphDef& graph, const string& graph_path,
++control_edge_count;
}
}
- if (node.device() != "") {
+ if (!node.device().empty()) {
++device_counts[node.device()];
}
if ((node.op() == "Const") || (node.op() == "Variable") ||
diff --git a/tensorflow/tools/graph_transforms/transform_graph.cc b/tensorflow/tools/graph_transforms/transform_graph.cc
index b8bf2dc090..a5d542910a 100644
--- a/tensorflow/tools/graph_transforms/transform_graph.cc
+++ b/tensorflow/tools/graph_transforms/transform_graph.cc
@@ -252,7 +252,7 @@ Status TransformGraph(const std::vector<string>& inputs,
TransformRegistry* transform_registry = GetTransformRegistry();
for (const auto& transform_info : transform_params) {
const string& transform_name = transform_info.first;
- if (transform_name == "") {
+ if (transform_name.empty()) {
continue;
}
if (!transform_registry->count(transform_name)) {
diff --git a/tensorflow/tools/graph_transforms/transform_utils.cc b/tensorflow/tools/graph_transforms/transform_utils.cc
index 2c639202b5..7d527469f2 100644
--- a/tensorflow/tools/graph_transforms/transform_utils.cc
+++ b/tensorflow/tools/graph_transforms/transform_utils.cc
@@ -110,7 +110,7 @@ string CanonicalInputName(const string& input_name) {
string node_name;
string suffix;
NodeNamePartsFromInput(input_name, &prefix, &node_name, &suffix);
- if (suffix == "") {
+ if (suffix.empty()) {
suffix = ":0";
}
return prefix + node_name + suffix;