diff options
author | 2018-09-29 23:29:28 -0700 | |
---|---|---|
committer | 2018-09-29 23:32:51 -0700 | |
commit | e0da6256cd116d17057374594f2fc191cf201f42 (patch) | |
tree | b149c385016d67d02daa7ed117bee44fbda65de1 | |
parent | d936d819752916d3122f02def571ecac9e995029 (diff) |
Fixed format errors reported by clang-format
-rw-r--r-- | tensorflow/core/common_runtime/process_util.cc | 11 | ||||
-rw-r--r-- | tensorflow/core/common_runtime/threadpool_device.cc | 6 | ||||
-rw-r--r-- | tensorflow/core/util/util.cc | 8 | ||||
-rw-r--r-- | tensorflow/core/util/util.h | 2 |
4 files changed, 12 insertions, 15 deletions
diff --git a/tensorflow/core/common_runtime/process_util.cc b/tensorflow/core/common_runtime/process_util.cc index b3064a4c08..4570496637 100644 --- a/tensorflow/core/common_runtime/process_util.cc +++ b/tensorflow/core/common_runtime/process_util.cc @@ -62,15 +62,16 @@ int32 NumInterOpThreadsFromSessionOptions(const SessionOptions& options) { // Set inter_op conservatively to avoid thread oversubscription that could // lead to severe perf degradations and OMP resource exhaustion int mkl_intra_op = 1; - #ifdef _OPENMP +#ifdef _OPENMP mkl_intra_op = omp_get_max_threads(); - #endif // _OPENMP +#endif // _OPENMP CHECK_GE(mkl_intra_op, 1); const int32 mkl_inter_op = std::max( (port::NumSchedulableCPUs() + mkl_intra_op - 1) / mkl_intra_op, 2); - VLOG(0) << "Creating new thread pool with default inter op setting: " - << mkl_inter_op - << ". Tune using inter_op_parallelism_threads for best performance."; + VLOG(0) + << "Creating new thread pool with default inter op setting: " + << mkl_inter_op + << ". Tune using inter_op_parallelism_threads for best performance."; return mkl_inter_op; } #endif // INTEL_MKL diff --git a/tensorflow/core/common_runtime/threadpool_device.cc b/tensorflow/core/common_runtime/threadpool_device.cc index f188016610..6404d8bc6a 100644 --- a/tensorflow/core/common_runtime/threadpool_device.cc +++ b/tensorflow/core/common_runtime/threadpool_device.cc @@ -51,8 +51,7 @@ ThreadPoolDevice::ThreadPoolDevice(const SessionOptions& options, scoped_allocator_mgr_(new ScopedAllocatorMgr(name)) { #ifdef INTEL_MKL // Early return when MKL is disabled - if (DisableMKL()) - return; + if (DisableMKL()) return; #ifdef _OPENMP const char* user_omp_threads = getenv("OMP_NUM_THREADS"); if (user_omp_threads == nullptr) { @@ -118,7 +117,8 @@ class MklCPUAllocatorFactory : public AllocatorFactory { }; #ifdef ENABLE_MKL -REGISTER_MEM_ALLOCATOR("MklCPUAllocator", (DisableMKL() ? 50 : 200), MklCPUAllocatorFactory); +REGISTER_MEM_ALLOCATOR("MklCPUAllocator", (DisableMKL() ? 50 : 200), + MklCPUAllocatorFactory); #endif // ENABLE_MKL } // namespace diff --git a/tensorflow/core/util/util.cc b/tensorflow/core/util/util.cc index 44d5becb9c..489999d1e8 100644 --- a/tensorflow/core/util/util.cc +++ b/tensorflow/core/util/util.cc @@ -122,11 +122,7 @@ string SliceDebugString(const TensorShape& shape, const int64 flat) { #ifdef INTEL_MKL bool DisableMKL() { - enum MklStatus { - MKL_DEFAULT = 0, - MKL_ON = 1, - MKL_OFF = 2 - }; + enum MklStatus { MKL_DEFAULT = 0, MKL_ON = 1, MKL_OFF = 2 }; static MklStatus status = MKL_DEFAULT; if (status == MKL_DEFAULT) { char* tf_disable_mkl = getenv("TF_DISABLE_MKL"); @@ -139,5 +135,5 @@ bool DisableMKL() { } return status == MKL_OFF ? true : false; } -#endif +#endif // INTEL_MKL } // namespace tensorflow diff --git a/tensorflow/core/util/util.h b/tensorflow/core/util/util.h index ba90ad52c2..4aa47aa48a 100644 --- a/tensorflow/core/util/util.h +++ b/tensorflow/core/util/util.h @@ -59,7 +59,7 @@ string SliceDebugString(const TensorShape& shape, const int64 flat); // disable MKL in runtime #ifdef INTEL_MKL bool DisableMKL(); -#endif +#endif // INTEL_MKL } // namespace tensorflow |