aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/gdr
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-01-30 10:43:03 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-01-30 12:33:54 -0800
commit4463d105a8a4a83642b9709ba79310e8f4ddf577 (patch)
tree240e9a0a9a6b9ad956c704776a33126ba00cbfe8 /tensorflow/contrib/gdr
parent8f0e7207774279f4fe50f4d6c4fbd576e2941463 (diff)
Cleanup: Ran clang-format on all *.{cc,h} files in tensorflow/contrib/.../*.{hh,c}.
PiperOrigin-RevId: 183855242
Diffstat (limited to 'tensorflow/contrib/gdr')
-rw-r--r--tensorflow/contrib/gdr/gdr_memory_manager.cc13
1 files changed, 7 insertions, 6 deletions
diff --git a/tensorflow/contrib/gdr/gdr_memory_manager.cc b/tensorflow/contrib/gdr/gdr_memory_manager.cc
index 5c7ac74428..81e70ae30a 100644
--- a/tensorflow/contrib/gdr/gdr_memory_manager.cc
+++ b/tensorflow/contrib/gdr/gdr_memory_manager.cc
@@ -86,8 +86,9 @@ int TryToReadNumaNode(ibv_device* device) {
if (strings::safe_strto32(content, &value)) {
if (value < 0) {
LOG(INFO) << "Successful NUMA node read from SysFS had negative value ("
- << value << "), but there must be at least one NUMA node"
- ", so returning NUMA node zero";
+ << value
+ << "), but there must be at least one NUMA node"
+ ", so returning NUMA node zero";
return 0;
}
LOG(INFO) << "NUMA node for device: " << device->name << " is " << value;
@@ -290,8 +291,8 @@ Status GdrMemoryManager::Init() {
// Host memory allocators
for (Allocator* allocator : allocators) {
auto* visitable_allocator = dynamic_cast<VisitableAllocator*>(allocator);
- CHECK(visitable_allocator) << "is not visitable for instrumentation"
- << allocator->Name();
+ CHECK(visitable_allocator)
+ << "is not visitable for instrumentation" << allocator->Name();
// Make sure we don't instrument the same allocator twice
if (instrumented_.find(allocator) == std::end(instrumented_)) {
visitable_allocator->AddAllocVisitor(alloc_visitor);
@@ -635,8 +636,8 @@ void GdrMemoryManager::TensorFromTransportOptions(
} else {
checksum = GPUUtil::Checksum(*tensor);
}
- CHECK(checksum == remote_mr.checksum()) << "Checksum mismatch: " << checksum
- << "!=" << remote_mr.checksum();
+ CHECK(checksum == remote_mr.checksum())
+ << "Checksum mismatch: " << checksum << "!=" << remote_mr.checksum();
#endif
}
done(Status::OK());