aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/batching/python/ops/batch_ops.py
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/contrib/batching/python/ops/batch_ops.py')
-rw-r--r--tensorflow/contrib/batching/python/ops/batch_ops.py6
1 files changed, 0 insertions, 6 deletions
diff --git a/tensorflow/contrib/batching/python/ops/batch_ops.py b/tensorflow/contrib/batching/python/ops/batch_ops.py
index 47b80bdf4a..55faad983f 100644
--- a/tensorflow/contrib/batching/python/ops/batch_ops.py
+++ b/tensorflow/contrib/batching/python/ops/batch_ops.py
@@ -58,8 +58,6 @@ def batch_function(num_batch_threads,
max_batch_size,
batch_timeout_micros,
allowed_batch_sizes=None,
- grad_timeout_micros=60 * 1000 * 1000,
- unbatch_timeout_micros=60 * 1000 * 1000,
max_enqueued_batches=10):
"""Batches the computation done by the decorated function.
@@ -94,10 +92,6 @@ def batch_function(num_batch_threads,
does nothing. Otherwise, supplies a list of batch sizes, causing the op
to pad batches up to one of those sizes. The entries must increase
monotonically, and the final entry must equal max_batch_size.
- grad_timeout_micros: The timeout to use for the gradient. See the
- documentation of the unbatch op for more details. Defaults to 60s.
- unbatch_timeout_micros: The timeout to use for unbatching. See the
- documentation of the unbatch op for more details. Defaults to 60s.
max_enqueued_batches: The maximum depth of the batch queue. Defaults to 10.
Returns: