aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/data
diff options
context:
space:
mode:
authorGravatar Jiri Simsa <jsimsa@google.com>2018-09-17 09:21:14 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-09-17 09:24:34 -0700
commitc8a0dfc741736a59f8fd1776b71f38619d66da56 (patch)
tree0a3ff87aed44e895ca7b3a09a93653f8eea7da59 /tensorflow/contrib/data
parent07bc3696135483612c727ca7687342922ff0d5de (diff)
[tf.data] Adding support for `tf.data.AUTOTUNE` as a special value for the `num_parallel_calls` argument of `tf.data.Dataset.map()`, `tf.data.Dataset.interleave()`, and `tf.contrib.data.map_and_batch()`.
When `tf.data.AUTOTUNE` is specified, the level of parallelism is determined at runtime. The underlying mechanism instruments the input pipeline to build a performance model and then uses the model to find the optimal values for the parallelism knobs. PiperOrigin-RevId: 213283297
Diffstat (limited to 'tensorflow/contrib/data')
-rw-r--r--tensorflow/contrib/data/python/kernel_tests/optimization/model_dataset_op_test.py17
1 files changed, 11 insertions, 6 deletions
diff --git a/tensorflow/contrib/data/python/kernel_tests/optimization/model_dataset_op_test.py b/tensorflow/contrib/data/python/kernel_tests/optimization/model_dataset_op_test.py
index 0a87d3e905..2b3ac85924 100644
--- a/tensorflow/contrib/data/python/kernel_tests/optimization/model_dataset_op_test.py
+++ b/tensorflow/contrib/data/python/kernel_tests/optimization/model_dataset_op_test.py
@@ -58,7 +58,8 @@ class ModelDatasetTest(test.TestCase):
dataset = dataset_ops.Dataset.from_tensors((np.random.rand(1, 4 * k),
np.random.rand(4 * k,
1))).repeat()
- dataset = dataset.map(math_ops.matmul, num_parallel_calls=56)
+ dataset = dataset.map(
+ math_ops.matmul, num_parallel_calls=optimization.AUTOTUNE)
iterator = dataset.apply(optimization.model()).make_one_shot_iterator()
get_next = iterator.get_next()
@@ -84,7 +85,9 @@ class ModelDatasetTest(test.TestCase):
1))).repeat()
dataset = dataset.apply(
batching.map_and_batch(
- math_ops.matmul, num_parallel_calls=28, batch_size=batch_size))
+ math_ops.matmul,
+ num_parallel_calls=optimization.AUTOTUNE,
+ batch_size=batch_size))
iterator = dataset.apply(optimization.model()).make_one_shot_iterator()
get_next = iterator.get_next()
@@ -109,7 +112,9 @@ class ModelDatasetTest(test.TestCase):
1))).repeat()
dataset = dataset.map(math_ops.matmul)
dataset = dataset_ops.Dataset.range(1).repeat().interleave(
- lambda _: dataset, cycle_length=56, num_parallel_calls=56)
+ lambda _: dataset,
+ cycle_length=10,
+ num_parallel_calls=optimization.AUTOTUNE)
iterator = dataset.apply(optimization.model()).make_one_shot_iterator()
get_next = iterator.get_next()
@@ -146,15 +151,15 @@ class ModelDatasetTest(test.TestCase):
x, y = c
return a, b, math_ops.matmul(x, y)
- dataset = dataset.map(f1, num_parallel_calls=32)
+ dataset = dataset.map(f1, num_parallel_calls=optimization.AUTOTUNE)
dataset = dataset_ops.Dataset.range(1).repeat().interleave(
lambda _: dataset, cycle_length=2)
- dataset = dataset.map(f2, num_parallel_calls=16)
+ dataset = dataset.map(f2, num_parallel_calls=optimization.AUTOTUNE)
dataset = dataset_ops.Dataset.range(1).repeat().interleave(
lambda _: dataset, cycle_length=2)
- dataset = dataset.map(f3, num_parallel_calls=10)
+ dataset = dataset.map(f3, num_parallel_calls=optimization.AUTOTUNE)
iterator = dataset.apply(optimization.model()).make_one_shot_iterator()
get_next = iterator.get_next()