diff options
author | Eugene Zhulenev <ezhulenev@google.com> | 2019-08-28 10:32:19 -0700 |
---|---|---|
committer | Eugene Zhulenev <ezhulenev@google.com> | 2019-08-28 10:32:19 -0700 |
commit | 6e77f9bef35012f160b307bdeae73194fde91e51 (patch) | |
tree | 25ad26d75c011883b2b553c58fff7f1b949d5af6 /unsupported | |
parent | 9aba527405b40132a308f5f782dacadf6ef50acd (diff) |
Remove shadow warnings in TensorDeviceThreadPool
Diffstat (limited to 'unsupported')
-rw-r--r-- | unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h index ca2794cb5..edb0b3e25 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h @@ -90,7 +90,6 @@ struct ThreadPoolDevice { // CPU cycles due to the threads competing for memory bandwidth, so we // statically schedule at most 4 block copies here. const size_t kMinBlockSize = 32768; - typedef TensorCostModel<ThreadPoolDevice> CostModel; const size_t num_threads = CostModel::numThreads(n, TensorOpCost(1.0, 1.0, 0), 4); if (n <= kMinBlockSize || num_threads < 2) { ::memcpy(dst, src, n); @@ -302,9 +301,12 @@ struct ThreadPoolDevice { // For parallelForAsync we must keep passed in closures on the heap, and // delete them only after `done` callback finished. struct ParallelForAsyncContext { - ParallelForAsyncContext(Index count, std::function<void(Index, Index)> f, - std::function<void()> done) - : count(count), f(std::move(f)), done(std::move(done)) {} + ParallelForAsyncContext(Index block_count, + std::function<void(Index, Index)> block_f, + std::function<void()> done_callback) + : count(block_count), + f(std::move(block_f)), + done(std::move(done_callback)) {} std::atomic<Index> count; std::function<void(Index, Index)> f; |