aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Asim Shankar <ashankar@google.com>2016-09-22 11:37:01 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2016-09-22 12:48:03 -0700
commit2c60ffb7dce7691ff799ffcfa6578deb12822b39 (patch)
tree9d2d913d34915372a3abdaa364b2082ba855e060
parentff40bc4d422f201b85c36721fb3d7ed1cf84c241 (diff)
Address a bunch of compiler warnings on the Mac.
For example: - Unused variables/functions/typedefs - Thread-safety annotations Change: 133986225
-rw-r--r--tensorflow/contrib/linear_optimizer/kernels/sdca_ops_test.cc7
-rw-r--r--tensorflow/core/common_runtime/kernel_benchmark_testlib.h1
-rw-r--r--tensorflow/core/distributed_runtime/base_rendezvous_mgr.cc1
-rw-r--r--tensorflow/core/distributed_runtime/base_rendezvous_mgr.h1
-rw-r--r--tensorflow/core/distributed_runtime/rpc/grpc_remote_worker.cc1
-rw-r--r--tensorflow/core/distributed_runtime/rpc/grpc_worker_service.h2
-rw-r--r--tensorflow/core/kernels/cwise_ops_common.cc14
-rw-r--r--tensorflow/core/kernels/debug_ops.h13
-rw-r--r--tensorflow/core/kernels/fractional_avg_pool_op.cc2
-rw-r--r--tensorflow/core/kernels/tensor_array.cc2
10 files changed, 8 insertions, 36 deletions
diff --git a/tensorflow/contrib/linear_optimizer/kernels/sdca_ops_test.cc b/tensorflow/contrib/linear_optimizer/kernels/sdca_ops_test.cc
index 89e56ac808..dcdc9ad1cf 100644
--- a/tensorflow/contrib/linear_optimizer/kernels/sdca_ops_test.cc
+++ b/tensorflow/contrib/linear_optimizer/kernels/sdca_ops_test.cc
@@ -77,13 +77,6 @@ Node* Ones(Graph* const g, const int n) {
return test::graph::Constant(g, data);
}
-Node* StringIota(Graph* const g, const int n) {
- Tensor data(DT_STRING, TensorShape({n}));
- test::FillFn<string>(
- &data, [](const int i) { return strings::StrCat(strings::Hex(i)); });
- return test::graph::Constant(g, data);
-}
-
Node* SparseExampleIndices(Graph* const g, const int sparse_features_per_group,
const int num_examples) {
const int x_size = num_examples * 4;
diff --git a/tensorflow/core/common_runtime/kernel_benchmark_testlib.h b/tensorflow/core/common_runtime/kernel_benchmark_testlib.h
index a590763829..278a6b3f9f 100644
--- a/tensorflow/core/common_runtime/kernel_benchmark_testlib.h
+++ b/tensorflow/core/common_runtime/kernel_benchmark_testlib.h
@@ -54,7 +54,6 @@ class Benchmark {
private:
thread::ThreadPool* pool_ = nullptr;
- thread::ThreadPool* non_blocking_pool_ = nullptr;
Device* device_ = nullptr;
Rendezvous* rendez_ = nullptr;
Executor* exec_ = nullptr;
diff --git a/tensorflow/core/distributed_runtime/base_rendezvous_mgr.cc b/tensorflow/core/distributed_runtime/base_rendezvous_mgr.cc
index e12646325e..30c260d7d1 100644
--- a/tensorflow/core/distributed_runtime/base_rendezvous_mgr.cc
+++ b/tensorflow/core/distributed_runtime/base_rendezvous_mgr.cc
@@ -130,7 +130,6 @@ BaseRemoteRendezvous::BaseRemoteRendezvous(const WorkerEnv* env, int64 step_id,
bool tolerate_dup_recv)
: env_(env),
step_id_(step_id),
- tolerate_dup_recv_(tolerate_dup_recv),
local_(NewLocalRendezvous(tolerate_dup_recv)) {}
BaseRemoteRendezvous::~BaseRemoteRendezvous() {
diff --git a/tensorflow/core/distributed_runtime/base_rendezvous_mgr.h b/tensorflow/core/distributed_runtime/base_rendezvous_mgr.h
index b208c0f874..2d939f12f2 100644
--- a/tensorflow/core/distributed_runtime/base_rendezvous_mgr.h
+++ b/tensorflow/core/distributed_runtime/base_rendezvous_mgr.h
@@ -162,7 +162,6 @@ class BaseRemoteRendezvous : public Rendezvous {
const int64 step_id_;
private:
- const bool tolerate_dup_recv_;
Rendezvous* local_; // Owns a Ref on this object.
mutable mutex mu_;
diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_remote_worker.cc b/tensorflow/core/distributed_runtime/rpc/grpc_remote_worker.cc
index 60597aab26..657113e01e 100644
--- a/tensorflow/core/distributed_runtime/rpc/grpc_remote_worker.cc
+++ b/tensorflow/core/distributed_runtime/rpc/grpc_remote_worker.cc
@@ -250,7 +250,6 @@ class GrpcRemoteWorker : public WorkerInterface {
// Support for logging.
WorkerCacheLogger* logger_;
- bool retry_unavailable_;
TF_DISALLOW_COPY_AND_ASSIGN(GrpcRemoteWorker);
};
diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_worker_service.h b/tensorflow/core/distributed_runtime/rpc/grpc_worker_service.h
index 4015eef07b..b6b05b7c30 100644
--- a/tensorflow/core/distributed_runtime/rpc/grpc_worker_service.h
+++ b/tensorflow/core/distributed_runtime/rpc/grpc_worker_service.h
@@ -23,7 +23,7 @@ class ServerBuilder;
namespace tensorflow {
class AsyncServiceInterface;
-class WorkerEnv;
+struct WorkerEnv;
// Returns an implementation of WorkerService rpc service.
AsyncServiceInterface* NewGrpcWorkerService(WorkerEnv* env,
diff --git a/tensorflow/core/kernels/cwise_ops_common.cc b/tensorflow/core/kernels/cwise_ops_common.cc
index 4e37f585fb..79bb223ccb 100644
--- a/tensorflow/core/kernels/cwise_ops_common.cc
+++ b/tensorflow/core/kernels/cwise_ops_common.cc
@@ -45,20 +45,6 @@ void BinaryOpShared::SetComputeError(OpKernelContext* ctx) {
}
}
-static BCast::Vec FromShape(const TensorShape& shape) {
- const int N = shape.dims();
- BCast::Vec ret(N);
- for (int i = 0; i < N; ++i) {
- ret[i] = shape.dim_size(i);
- }
- return ret;
-}
-
-static TensorShape ToShape(const BCast::Vec& vec) {
- TensorShape shape(vec);
- return shape;
-}
-
BinaryOpShared::BinaryOpState::BinaryOpState(OpKernelContext* ctx)
: in0(ctx->input(0)),
in1(ctx->input(1)),
diff --git a/tensorflow/core/kernels/debug_ops.h b/tensorflow/core/kernels/debug_ops.h
index 95c0e49b9c..d916ca4996 100644
--- a/tensorflow/core/kernels/debug_ops.h
+++ b/tensorflow/core/kernels/debug_ops.h
@@ -38,13 +38,6 @@ class CopyOp : public OpKernel {
void Compute(OpKernelContext* context) override {
const Tensor& src_tensor = context->input(0);
- DeviceContext* device_ctxt = context->op_device_context();
- Device* device = static_cast<Device*>(context->device());
-
- // Determine if the input tensor is not on CPU (e.g., on GPU).
- bool off_host_input = device->device_type() == DEVICE_GPU &&
- !context->input_alloc_attr(0).on_host();
-
if (src_tensor.IsInitialized()) {
// Source tensor is initialized. Make a copy.
Tensor* copied_tensor;
@@ -52,7 +45,13 @@ class CopyOp : public OpKernel {
&copied_tensor));
#if GOOGLE_CUDA
+ Device* device = static_cast<Device*>(context->device());
+ // Determine if the input tensor is not on CPU (e.g., on GPU).
+ bool off_host_input = device->device_type() == DEVICE_GPU &&
+ !context->input_alloc_attr(0).on_host();
+
if (off_host_input) {
+ DeviceContext* device_ctxt = context->op_device_context();
// Input is not on host: deep-copy it from GPU to the same GPU.
Notification done_copy;
GPUUtil::CopyGPUTensorToSameGPU(
diff --git a/tensorflow/core/kernels/fractional_avg_pool_op.cc b/tensorflow/core/kernels/fractional_avg_pool_op.cc
index a983d9362c..9bba6712a2 100644
--- a/tensorflow/core/kernels/fractional_avg_pool_op.cc
+++ b/tensorflow/core/kernels/fractional_avg_pool_op.cc
@@ -235,8 +235,6 @@ class FractionalAvgPoolGradOp : public OpKernel {
// tensor of double type. And cast it to the corresponding type.
typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>>
ConstEigenMatrixMap;
- typedef Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>>
- EigenMatrixMap;
typedef Eigen::Map<Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>>
EigenDoubleMatrixMap;
diff --git a/tensorflow/core/kernels/tensor_array.cc b/tensorflow/core/kernels/tensor_array.cc
index dc1b14ec36..ad3f7cb1e5 100644
--- a/tensorflow/core/kernels/tensor_array.cc
+++ b/tensorflow/core/kernels/tensor_array.cc
@@ -79,7 +79,7 @@ std::atomic<int64> TensorArray::tensor_array_counter{0};
Status TensorArray::CopyShapesFrom(TensorArray* rhs) {
mutex_lock l(mu_);
- mutex_lock l_rhs(*rhs->mu());
+ mutex_lock l_rhs(rhs->mu_);
TF_RETURN_IF_ERROR(LockedReturnIfClosed());
TF_RETURN_IF_ERROR(rhs->LockedReturnIfClosed());
if (tensors_.size() != rhs->tensors_.size()) {