aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/nccl
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2017-05-31 13:45:15 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-05-31 13:49:26 -0700
commit6882effb863dcd0da00d3287959deac46734a0b2 (patch)
tree75364d9f41109003d318dd1e50a73988e9229d5d /tensorflow/contrib/nccl
parent0b8070253d6c62ad395a42c3f496c3f21ae5d975 (diff)
Make single-parameter constructors explicit
PiperOrigin-RevId: 157628970
Diffstat (limited to 'tensorflow/contrib/nccl')
-rw-r--r--tensorflow/contrib/nccl/kernels/nccl_manager.cc2
-rw-r--r--tensorflow/contrib/nccl/kernels/nccl_ops.cc10
2 files changed, 7 insertions, 5 deletions
diff --git a/tensorflow/contrib/nccl/kernels/nccl_manager.cc b/tensorflow/contrib/nccl/kernels/nccl_manager.cc
index b289c91bb8..42e7789301 100644
--- a/tensorflow/contrib/nccl/kernels/nccl_manager.cc
+++ b/tensorflow/contrib/nccl/kernels/nccl_manager.cc
@@ -66,7 +66,7 @@ struct NcclManager::CommunicatorMember {
struct NcclManager::Communicator {
public:
- Communicator(std::vector<CommunicatorMember> members)
+ explicit Communicator(std::vector<CommunicatorMember> members)
: num_devices(members.size()), members(std::move(members)) {}
const int num_devices;
diff --git a/tensorflow/contrib/nccl/kernels/nccl_ops.cc b/tensorflow/contrib/nccl/kernels/nccl_ops.cc
index b63ab5d611..3c532e3d73 100644
--- a/tensorflow/contrib/nccl/kernels/nccl_ops.cc
+++ b/tensorflow/contrib/nccl/kernels/nccl_ops.cc
@@ -38,7 +38,7 @@ namespace tensorflow {
// when the async op kernel's done callback is called.
class NcclAsyncOpBase : public AsyncOpKernel {
public:
- NcclAsyncOpBase(OpKernelConstruction* c) : AsyncOpKernel(c) {
+ explicit NcclAsyncOpBase(OpKernelConstruction* c) : AsyncOpKernel(c) {
OP_REQUIRES_OK(c, c->GetAttr("num_devices", &num_devices_));
OP_REQUIRES_OK(c, c->GetAttr("shared_name", &collective_prefix_));
}
@@ -62,7 +62,7 @@ class NcclAsyncOpBase : public AsyncOpKernel {
// <k> devices in the communicator.
class NcclAllReduceOpKernel : public NcclAsyncOpBase {
public:
- NcclAllReduceOpKernel(OpKernelConstruction* c) : NcclAsyncOpBase(c) {
+ explicit NcclAllReduceOpKernel(OpKernelConstruction* c) : NcclAsyncOpBase(c) {
string reduction;
OP_REQUIRES_OK(c, c->GetAttr("reduction", &reduction));
if (reduction == "min") {
@@ -106,7 +106,8 @@ REGISTER_KERNEL_BUILDER(Name("NcclAllReduce").Device(DEVICE_GPU),
class NcclBroadcastSendKernel : public NcclAsyncOpBase {
public:
- NcclBroadcastSendKernel(OpKernelConstruction* c) : NcclAsyncOpBase(c) {}
+ explicit NcclBroadcastSendKernel(OpKernelConstruction* c)
+ : NcclAsyncOpBase(c) {}
void ComputeAsync(OpKernelContext* c, DoneCallback done) override {
auto actual_done = [c, done](Status s) {
@@ -127,7 +128,8 @@ REGISTER_KERNEL_BUILDER(Name("NcclBroadcastSend").Device(DEVICE_GPU),
class NcclBroadcastRecvKernel : public NcclAsyncOpBase {
public:
- NcclBroadcastRecvKernel(OpKernelConstruction* c) : NcclAsyncOpBase(c) {}
+ explicit NcclBroadcastRecvKernel(OpKernelConstruction* c)
+ : NcclAsyncOpBase(c) {}
void ComputeAsync(OpKernelContext* c, DoneCallback done) override {
const Tensor& shape_t = c->input(0);