aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/rnn/kernels
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2017-10-24 13:48:27 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-10-24 13:51:58 -0700
commit73f8b044ea7333b25ef5c9841c1e072e45ad5890 (patch)
tree296ec77fb7e2ea15fc29208b240e29541026c14f /tensorflow/contrib/rnn/kernels
parent03f1105003c7e30127ed9449524c36d2c384b79c (diff)
replace min for std:min to avoid issues with clang compilation
Diffstat (limited to 'tensorflow/contrib/rnn/kernels')
-rw-r--r--tensorflow/contrib/rnn/kernels/lstm_ops_gpu.cu.cc4
1 files changed, 2 insertions, 2 deletions
diff --git a/tensorflow/contrib/rnn/kernels/lstm_ops_gpu.cu.cc b/tensorflow/contrib/rnn/kernels/lstm_ops_gpu.cu.cc
index d82676ff7e..6d3758fef1 100644
--- a/tensorflow/contrib/rnn/kernels/lstm_ops_gpu.cu.cc
+++ b/tensorflow/contrib/rnn/kernels/lstm_ops_gpu.cu.cc
@@ -209,7 +209,7 @@ void LSTMBlockCellFpropWithCUDA(
// Use 2D blocks. The number of threads per block is equal to x * y, where x =
// min(batch_size, 8) and y = 32. See above for guidance on number of
// threads.
- dim3 block_dim_2d(min(batch_size, 8), 32);
+ dim3 block_dim_2d(std::min(batch_size, 8), 32);
dim3 grid_dim_2d(Eigen::divup(batch_size, static_cast<int>(block_dim_2d.x)),
Eigen::divup(cell_size, static_cast<int>(block_dim_2d.y)));
@@ -323,7 +323,7 @@ void LSTMBlockCellBpropWithCUDA(
const bool use_peephole) {
const cudaStream_t& cu_stream = GetCudaStream(ctx);
- dim3 block_dim_2d(min(batch_size, 8), 32);
+ dim3 block_dim_2d(std::min(batch_size, 8), 32);
dim3 grid_dim_2d(Eigen::divup(batch_size, static_cast<int>(block_dim_2d.x)),
Eigen::divup(cell_size, static_cast<int>(block_dim_2d.y)));