aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--tensorflow/core/common_runtime/gpu/gpu_event_mgr.cc4
-rw-r--r--tensorflow/core/common_runtime/simple_graph_execution_state.cc16
-rw-r--r--tensorflow/core/kernels/resize_bilinear_op_test.cc2
-rw-r--r--tensorflow/core/kernels/xsmm_conv2d.cc2
-rw-r--r--tensorflow/core/lib/io/zlib_inputstream.cc2
-rw-r--r--tensorflow/core/lib/io/zlib_outputbuffer.cc2
-rw-r--r--tensorflow/core/lib/jpeg/jpeg_mem_unittest.cc8
-rw-r--r--tensorflow/core/ops/training_ops_test.cc8
-rw-r--r--tensorflow/core/util/events_writer.cc10
-rw-r--r--tensorflow/stream_executor/cuda/cuda_diagnostics.cc2
-rw-r--r--tensorflow/tools/tfprof/internal/tfprof_op.cc19
-rw-r--r--tensorflow/tools/tfprof/tfprof_main.cc4
12 files changed, 41 insertions, 38 deletions
diff --git a/tensorflow/core/common_runtime/gpu/gpu_event_mgr.cc b/tensorflow/core/common_runtime/gpu/gpu_event_mgr.cc
index 9618717fc5..2452efc779 100644
--- a/tensorflow/core/common_runtime/gpu/gpu_event_mgr.cc
+++ b/tensorflow/core/common_runtime/gpu/gpu_event_mgr.cc
@@ -77,14 +77,14 @@ EventMgr::~EventMgr() {
}
void EventMgr::StartPollingLoop() {
- CHECK(polling_stopped_.get() == nullptr);
+ CHECK(polling_stopped_ == nullptr);
stop_polling_.reset(new Notification);
polling_stopped_.reset(new Notification);
threadpool_.Schedule([this]() { PollLoop(); });
}
void EventMgr::StopPollingLoop() {
- if (stop_polling_.get()) {
+ if (stop_polling_) {
stop_polling_->Notify();
polling_stopped_->WaitForNotification();
stop_polling_.reset(nullptr);
diff --git a/tensorflow/core/common_runtime/simple_graph_execution_state.cc b/tensorflow/core/common_runtime/simple_graph_execution_state.cc
index 3806f9f47f..1a977c1460 100644
--- a/tensorflow/core/common_runtime/simple_graph_execution_state.cc
+++ b/tensorflow/core/common_runtime/simple_graph_execution_state.cc
@@ -74,8 +74,8 @@ SimpleGraphExecutionState::~SimpleGraphExecutionState() {
std::unique_ptr<SimpleGraphExecutionState> ret(
new SimpleGraphExecutionState(graph_def, options));
- TF_RETURN_IF_ERROR(AddDefaultAttrsToGraphDef(&ret->original_graph_def_,
- *ret->flib_def_.get(), 0));
+ TF_RETURN_IF_ERROR(
+ AddDefaultAttrsToGraphDef(&ret->original_graph_def_, *ret->flib_def_, 0));
// TODO(mrry): Refactor InitBaseGraph() so that we don't have to
// pass an empty BuildGraphOptions (that isn't going to be used when
// place_pruned_graph is false).
@@ -103,8 +103,8 @@ SimpleGraphExecutionState::~SimpleGraphExecutionState() {
GraphDef temp(graph_def);
std::unique_ptr<SimpleGraphExecutionState> ret(
new SimpleGraphExecutionState(&temp, options));
- TF_RETURN_IF_ERROR(AddDefaultAttrsToGraphDef(&ret->original_graph_def_,
- *ret->flib_def_.get(), 0));
+ TF_RETURN_IF_ERROR(
+ AddDefaultAttrsToGraphDef(&ret->original_graph_def_, *ret->flib_def_, 0));
TF_RETURN_IF_ERROR(ret->InitBaseGraph(subgraph_options));
TF_RETURN_IF_ERROR(ret->BuildGraph(subgraph_options, out_client_graph));
*out_state = std::move(ret);
@@ -139,7 +139,7 @@ Status SimpleGraphExecutionState::Extend(
int old_node_size = gdef.node_size();
gdef.mutable_node()->MergeFrom(extension_def.node());
TF_RETURN_IF_ERROR(
- AddDefaultAttrsToGraphDef(&gdef, *flib_def_.get(), old_node_size));
+ AddDefaultAttrsToGraphDef(&gdef, *flib_def_, old_node_size));
// Merge versions
if (gdef.has_versions()) {
if (gdef.versions().producer() != extension_def.versions().producer()) {
@@ -181,7 +181,7 @@ Status SimpleGraphExecutionState::Extend(
if (gdef.versions().producer() >= 5) {
// Validate the graph: we assume that merging two valid graphs
// should maintain graph validity.
- TF_RETURN_IF_ERROR(graph::ValidateGraphDef(gdef, *flib_def_.get()));
+ TF_RETURN_IF_ERROR(graph::ValidateGraphDef(gdef, *flib_def_));
}
// 6. Add the extension.
@@ -196,7 +196,7 @@ Status SimpleGraphExecutionState::Extend(
new SimpleGraphExecutionState(&gdef, combined_options));
TF_RETURN_IF_ERROR(AddDefaultAttrsToGraphDef(
- &new_execution_state->original_graph_def_, *flib_def_.get(), 0));
+ &new_execution_state->original_graph_def_, *flib_def_, 0));
if (!session_options_->config.graph_options().place_pruned_graph()) {
// TODO(mrry): Refactor InitBaseGraph() so that we don't have to
// pass an empty BuildGraphOptions (that isn't going to be used
@@ -313,7 +313,7 @@ Status SimpleGraphExecutionState::InitBaseGraph(
CostModel costs(true /*is_global*/);
{
mutex_lock l(mu_);
- costs_.InitFromGraph(*new_graph.get());
+ costs_.InitFromGraph(*new_graph);
costs.MergeFromGlobal(costs_);
}
diff --git a/tensorflow/core/kernels/resize_bilinear_op_test.cc b/tensorflow/core/kernels/resize_bilinear_op_test.cc
index f32441bfb9..bcb78a7054 100644
--- a/tensorflow/core/kernels/resize_bilinear_op_test.cc
+++ b/tensorflow/core/kernels/resize_bilinear_op_test.cc
@@ -194,7 +194,7 @@ TEST_F(ResizeBilinearOpTest, TestBilinearRandom2x2To1x1) {
ResizeBilinearBaseline(input->tensor<float, 4>(),
expected->tensor<float, 4>());
EXPECT_EQ(input->flat<float>()(0), output->flat<float>()(0));
- test::ExpectTensorEqual<float>(*expected.get(), *output);
+ test::ExpectTensorEqual<float>(*expected, *output);
}
TEST_F(ResizeBilinearOpAlignCornersTest, TestBilinearAlignCorners2x2To1x1) {
diff --git a/tensorflow/core/kernels/xsmm_conv2d.cc b/tensorflow/core/kernels/xsmm_conv2d.cc
index 7936cbcd46..c4690eb23e 100644
--- a/tensorflow/core/kernels/xsmm_conv2d.cc
+++ b/tensorflow/core/kernels/xsmm_conv2d.cc
@@ -17,7 +17,7 @@ limitations under the License.
// libxsmm is not available.
#ifndef TENSORFLOW_USE_LIBXSMM
-void dummy_xsmm_conv2d_ensure_file_is_not_empty(void);
+void dummy_xsmm_conv2d_ensure_file_is_not_empty();
#else
#define USE_EIGEN_TENSOR
diff --git a/tensorflow/core/lib/io/zlib_inputstream.cc b/tensorflow/core/lib/io/zlib_inputstream.cc
index 85a1fc032f..3de157a1fc 100644
--- a/tensorflow/core/lib/io/zlib_inputstream.cc
+++ b/tensorflow/core/lib/io/zlib_inputstream.cc
@@ -37,7 +37,7 @@ ZlibInputStream::ZlibInputStream(
}
ZlibInputStream::~ZlibInputStream() {
- if (z_stream_.get()) {
+ if (z_stream_) {
inflateEnd(z_stream_.get());
}
}
diff --git a/tensorflow/core/lib/io/zlib_outputbuffer.cc b/tensorflow/core/lib/io/zlib_outputbuffer.cc
index 5901504b87..4a6bedbad8 100644
--- a/tensorflow/core/lib/io/zlib_outputbuffer.cc
+++ b/tensorflow/core/lib/io/zlib_outputbuffer.cc
@@ -36,7 +36,7 @@ ZlibOutputBuffer::ZlibOutputBuffer(
z_stream_(new z_stream) {}
ZlibOutputBuffer::~ZlibOutputBuffer() {
- if (z_stream_.get()) {
+ if (z_stream_) {
LOG(WARNING) << "ZlibOutputBuffer::Close() not called. Possible data loss";
}
}
diff --git a/tensorflow/core/lib/jpeg/jpeg_mem_unittest.cc b/tensorflow/core/lib/jpeg/jpeg_mem_unittest.cc
index 3d3050cd0d..cc8646750e 100644
--- a/tensorflow/core/lib/jpeg/jpeg_mem_unittest.cc
+++ b/tensorflow/core/lib/jpeg/jpeg_mem_unittest.cc
@@ -71,17 +71,17 @@ void TestJPEG(Env* env, const string& jpegfile) {
// Set min_acceptable_fraction to something insufficient
flags.min_acceptable_fraction = 0.8;
imgdata.reset(Uncompress(temp, fsize / 2, flags, &w, &h, &c, nullptr));
- CHECK(imgdata.get() == nullptr);
+ CHECK(imgdata == nullptr);
// Now, use a value that makes fsize/2 be enough for a black-filling
flags.min_acceptable_fraction = 0.01;
imgdata.reset(Uncompress(temp, fsize / 2, flags, &w, &h, &c, nullptr));
- CHECK(imgdata.get() != nullptr);
+ CHECK(imgdata != nullptr);
// Finally, uncompress the whole data
flags.min_acceptable_fraction = 1.0;
imgdata.reset(Uncompress(temp, fsize, flags, &w, &h, &c, nullptr));
- CHECK(imgdata.get() != nullptr);
+ CHECK(imgdata != nullptr);
}
TEST(JpegMemTest, Jpeg) {
@@ -267,7 +267,7 @@ TEST(JpegMemTest, ChromaDownsampling) {
int64 num_warnings;
std::unique_ptr<uint8[]> uncompressed(Uncompress(
jpeg.c_str(), jpeg.size(), unflags, &w, &h, &c, &num_warnings));
- CHECK(uncompressed.get() != nullptr);
+ CHECK(uncompressed != nullptr);
CHECK_EQ(num_warnings, 0);
// Recompress the JPEG with and without chroma downsampling
diff --git a/tensorflow/core/ops/training_ops_test.cc b/tensorflow/core/ops/training_ops_test.cc
index da66fbe4ba..8a77c9be15 100644
--- a/tensorflow/core/ops/training_ops_test.cc
+++ b/tensorflow/core/ops/training_ops_test.cc
@@ -32,15 +32,15 @@ static void TestGradAndIndicesErrorHandling(const ShapeInferenceTestOp& op,
// mismatch between grad[1] and var[1].
INFER_ERROR("Dimension 1 in both shapes must be equal", op,
- shape_spec("[?,1]", "[?,2];[?]").c_str());
+ shape_spec("[?,1]", "[?,2];[?]"));
// grad[0] and indices[0] must match.
INFER_ERROR("Dimensions must be equal, but are 1 and 2", op,
- shape_spec("?", "[2,?];[1]").c_str());
+ shape_spec("?", "[2,?];[1]"));
// grad is wrong rank.
- INFER_ERROR("must be equal rank", op, shape_spec("[1]", "[?,2];[?]").c_str());
+ INFER_ERROR("must be equal rank", op, shape_spec("[1]", "[?,2];[?]"));
// indices is wrong rank.
INFER_ERROR("Shape must be rank 1 but is rank 2", op,
- shape_spec("[?]", "[?];[1,2]").c_str());
+ shape_spec("[?]", "[?];[1,2]"));
}
TEST(TrainingOpsTest, ApplyGradientDescent_ShapeFn) {
diff --git a/tensorflow/core/util/events_writer.cc b/tensorflow/core/util/events_writer.cc
index 3ce2c8ae0b..23b00e23dd 100644
--- a/tensorflow/core/util/events_writer.cc
+++ b/tensorflow/core/util/events_writer.cc
@@ -36,7 +36,7 @@ EventsWriter::EventsWriter(const string& file_prefix)
num_outstanding_events_(0) {}
bool EventsWriter::InitIfNeeded() {
- if (recordio_writer_.get() != nullptr) {
+ if (recordio_writer_ != nullptr) {
CHECK(!filename_.empty());
if (FileHasDisappeared()) {
// Warn user of data loss and let .reset() below do basic cleanup.
@@ -63,7 +63,7 @@ bool EventsWriter::InitIfNeeded() {
return false;
}
recordio_writer_.reset(new io::RecordWriter(recordio_file_.get()));
- if (recordio_writer_.get() == nullptr) {
+ if (recordio_writer_ == nullptr) {
LOG(ERROR) << "Could not create record writer";
return false;
}
@@ -90,7 +90,7 @@ string EventsWriter::FileName() {
}
void EventsWriter::WriteSerializedEvent(StringPiece event_str) {
- if (recordio_writer_.get() == nullptr) {
+ if (recordio_writer_ == nullptr) {
if (!InitIfNeeded()) {
LOG(ERROR) << "Write failed because file could not be opened.";
return;
@@ -110,7 +110,7 @@ void EventsWriter::WriteEvent(const Event& event) {
bool EventsWriter::Flush() {
if (num_outstanding_events_ == 0) return true;
- CHECK(recordio_file_.get() != nullptr) << "Unexpected NULL file";
+ CHECK(recordio_file_ != nullptr) << "Unexpected NULL file";
if (!recordio_writer_->Flush().ok()) {
LOG(ERROR) << "Failed to flush " << num_outstanding_events_ << " events to "
@@ -139,7 +139,7 @@ bool EventsWriter::Flush() {
bool EventsWriter::Close() {
bool return_value = Flush();
- if (recordio_file_.get() != nullptr) {
+ if (recordio_file_ != nullptr) {
Status s = recordio_file_->Close();
if (!s.ok()) {
LOG(ERROR) << "Error when closing previous event file: " << filename_
diff --git a/tensorflow/stream_executor/cuda/cuda_diagnostics.cc b/tensorflow/stream_executor/cuda/cuda_diagnostics.cc
index 01ce67252e..9ea6474934 100644
--- a/tensorflow/stream_executor/cuda/cuda_diagnostics.cc
+++ b/tensorflow/stream_executor/cuda/cuda_diagnostics.cc
@@ -369,7 +369,7 @@ port::StatusOr<DriverVersion> Diagnostician::FindKernelDriverVersion() {
LOG(INFO) << "driver version file contents: \"\"\"" << contents.begin()
<< "\"\"\"";
fclose(driver_version_file);
- return FindKernelModuleVersion(string{contents.begin()});
+ return FindKernelModuleVersion(contents.begin());
}
auto status =
diff --git a/tensorflow/tools/tfprof/internal/tfprof_op.cc b/tensorflow/tools/tfprof/internal/tfprof_op.cc
index f2342a229d..8c1bd7036e 100644
--- a/tensorflow/tools/tfprof/internal/tfprof_op.cc
+++ b/tensorflow/tools/tfprof/internal/tfprof_op.cc
@@ -177,10 +177,11 @@ string TFOp::FormatNode(OpNode* node, OpNode* root, const Options& opts) {
root->proto().total_exec_micros();
}
- attrs.push_back(strings::Printf("%30s", strings::Printf(
- "%s (%.2f%%, %.2f%%)",
- FormatTime(node->proto().exec_micros()).c_str(),
- accu_pct, pct).c_str()).c_str());
+ attrs.push_back(strings::Printf(
+ "%30s", strings::Printf("%s (%.2f%%, %.2f%%)",
+ FormatTime(node->proto().exec_micros()).c_str(),
+ accu_pct, pct)
+ .c_str()));
}
if (opts.select.find(kShown[2]) != opts.select.end()) {
@@ -192,10 +193,12 @@ string TFOp::FormatNode(OpNode* node, OpNode* root, const Options& opts) {
pct = 100.0 * node->proto().parameters() /
root->proto().total_parameters();
}
- attrs.push_back(strings::Printf("%30s", strings::Printf(
- "%s params (%.2f%%, %.2f%%)",
- FormatNumber(node->proto().parameters()).c_str(),
- accu_pct, pct).c_str()).c_str());
+ attrs.push_back(strings::Printf(
+ "%30s",
+ strings::Printf("%s params (%.2f%%, %.2f%%)",
+ FormatNumber(node->proto().parameters()).c_str(),
+ accu_pct, pct)
+ .c_str()));
}
if (opts.select.find(kShown[3]) != opts.select.end()) {
diff --git a/tensorflow/tools/tfprof/tfprof_main.cc b/tensorflow/tools/tfprof/tfprof_main.cc
index 7d667476a4..2486c2ac28 100644
--- a/tensorflow/tools/tfprof/tfprof_main.cc
+++ b/tensorflow/tools/tfprof/tfprof_main.cc
@@ -41,7 +41,7 @@ limitations under the License.
using tensorflow::str_util::Split;
void completion(const char* buf, linenoiseCompletions* lc) {
- tensorflow::string buf_str = tensorflow::string(buf);
+ tensorflow::string buf_str = buf;
if (buf_str.find(" ") == buf_str.npos) {
for (const char* opt : tensorflow::tfprof::kCmds) {
if (tensorflow::string(opt).find(buf_str) == 0) {
@@ -246,7 +246,7 @@ int main(int argc, char** argv) {
linenoiseHistoryLoad(".tfprof_history.txt");
for (char* line = nullptr; (line = linenoise("tfprof> ")) != nullptr;) {
- tensorflow::string line_s = tensorflow::string(line);
+ tensorflow::string line_s = line;
free(line);
if (line_s.empty()) {