aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow
diff options
context:
space:
mode:
authorGravatar Jiri Simsa <jsimsa@google.com>2018-10-01 17:26:37 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-10-01 17:31:54 -0700
commit9a23e9251ecba026471ff77a5bbbc802a2889a10 (patch)
treeb3cdc0a120ac823a91fbb8a8b68e0846f239885e /tensorflow
parentea5c529ed7b7e17d1e66bc7cf4479d232ed0a896 (diff)
[tf.data] Adding `tf.data.Options()`, `tf.data.Dataset.options()`, and `tf.data.Dataset.with_options()` to make it possible to respectively represent, get, and set options, such as optimization configuration, of a tf.data input pipeline.
PiperOrigin-RevId: 215310764
Diffstat (limited to 'tensorflow')
-rw-r--r--tensorflow/core/kernels/data/optimize_dataset_op.cc16
-rw-r--r--tensorflow/python/data/experimental/kernel_tests/optimization/hoist_random_uniform_test.py11
-rw-r--r--tensorflow/python/data/experimental/kernel_tests/optimization/latency_all_edges_test.py7
-rw-r--r--tensorflow/python/data/experimental/kernel_tests/optimization/map_and_filter_fusion_test.py27
-rw-r--r--tensorflow/python/data/experimental/kernel_tests/optimization/map_parallelization_test.py6
-rw-r--r--tensorflow/python/data/experimental/kernel_tests/optimization/map_vectorization_test.py14
-rw-r--r--tensorflow/python/data/experimental/kernel_tests/optimization/model_dataset_op_test.py20
-rw-r--r--tensorflow/python/data/experimental/kernel_tests/optimization/noop_elimination_test.py4
-rw-r--r--tensorflow/python/data/experimental/kernel_tests/optimization/optimize_dataset_op_test.py45
-rw-r--r--tensorflow/python/data/experimental/ops/optimization.py61
-rw-r--r--tensorflow/python/data/kernel_tests/BUILD18
-rw-r--r--tensorflow/python/data/kernel_tests/dataset_ops_test.py158
-rw-r--r--tensorflow/python/data/ops/dataset_ops.py268
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.data.-dataset.pbtxt8
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.data.-fixed-length-record-dataset.pbtxt8
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.data.-options.pbtxt57
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.data.-t-f-record-dataset.pbtxt8
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.data.-text-line-dataset.pbtxt8
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.data.experimental.-csv-dataset.pbtxt8
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.data.experimental.-random-dataset.pbtxt8
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.data.experimental.-sql-dataset.pbtxt8
-rw-r--r--tensorflow/tools/api/golden/v1/tensorflow.data.pbtxt4
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.data.-dataset.pbtxt8
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.data.-fixed-length-record-dataset.pbtxt8
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.data.-options.pbtxt57
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.data.-t-f-record-dataset.pbtxt8
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.data.-text-line-dataset.pbtxt8
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.data.experimental.-csv-dataset.pbtxt8
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.data.experimental.-random-dataset.pbtxt8
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.data.experimental.-sql-dataset.pbtxt8
-rw-r--r--tensorflow/tools/api/golden/v2/tensorflow.data.pbtxt4
31 files changed, 742 insertions, 147 deletions
diff --git a/tensorflow/core/kernels/data/optimize_dataset_op.cc b/tensorflow/core/kernels/data/optimize_dataset_op.cc
index d5b725eac9..1cb7caa738 100644
--- a/tensorflow/core/kernels/data/optimize_dataset_op.cc
+++ b/tensorflow/core/kernels/data/optimize_dataset_op.cc
@@ -154,12 +154,8 @@ class OptimizeDatasetOp : public UnaryDatasetOpKernel {
: DatasetIterator<Dataset>(params) {}
Status Initialize(IteratorContext* ctx) override {
- IteratorContext::Params params;
- params.env = ctx->env();
- params.runner = *(ctx->runner());
- params.stats_aggregator_getter = ctx->stats_aggregator_getter();
+ IteratorContext::Params params = ctx->params();
params.lib = dataset()->lib_;
- params.allocator_getter = ctx->allocator_getter();
return dataset()->optimized_input_->MakeIterator(
IteratorContext(params), prefix(), &input_impl_);
}
@@ -167,14 +163,10 @@ class OptimizeDatasetOp : public UnaryDatasetOpKernel {
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
- IteratorContext::Params params;
- params.env = ctx->env();
- params.runner = *(ctx->runner());
- params.stats_aggregator_getter = ctx->stats_aggregator_getter();
+ IteratorContext::Params params = ctx->params();
params.lib = dataset()->lib_;
- params.allocator_getter = ctx->allocator_getter();
- IteratorContext iter_ctx(params);
- return input_impl_->GetNext(&iter_ctx, out_tensors, end_of_sequence);
+ return input_impl_->GetNext(IteratorContext(params), out_tensors,
+ end_of_sequence);
}
protected:
diff --git a/tensorflow/python/data/experimental/kernel_tests/optimization/hoist_random_uniform_test.py b/tensorflow/python/data/experimental/kernel_tests/optimization/hoist_random_uniform_test.py
index 3cd9753665..81437c0aec 100644
--- a/tensorflow/python/data/experimental/kernel_tests/optimization/hoist_random_uniform_test.py
+++ b/tensorflow/python/data/experimental/kernel_tests/optimization/hoist_random_uniform_test.py
@@ -64,7 +64,9 @@ class HoistRandomUniformTest(test_base.DatasetTestBase, parameterized.TestCase):
optimization.assert_next(
["Zip[0]", "Map"] if will_optimize else ["Map"])).map(function)
- dataset = dataset.apply(optimization.optimize(["hoist_random_uniform"]))
+ options = dataset_ops.Options()
+ options.experimental_hoist_random_uniform = True
+ dataset = dataset.with_options(options)
self._testDataset(dataset)
def testAdditionalInputs(self):
@@ -77,9 +79,10 @@ class HoistRandomUniformTest(test_base.DatasetTestBase, parameterized.TestCase):
[], minval=1, maxval=10, dtype=dtypes.float32, seed=42)
dataset = dataset_ops.Dataset.range(5).apply(
- optimization.assert_next(
- ["Zip[0]", "Map"])).map(random_with_capture).apply(
- optimization.optimize(["hoist_random_uniform"]))
+ optimization.assert_next(["Zip[0]", "Map"])).map(random_with_capture)
+ options = dataset_ops.Options()
+ options.experimental_hoist_random_uniform = True
+ dataset = dataset.with_options(options)
self._testDataset(dataset)
def _testDataset(self, dataset):
diff --git a/tensorflow/python/data/experimental/kernel_tests/optimization/latency_all_edges_test.py b/tensorflow/python/data/experimental/kernel_tests/optimization/latency_all_edges_test.py
index 45623876ae..26fec0414e 100644
--- a/tensorflow/python/data/experimental/kernel_tests/optimization/latency_all_edges_test.py
+++ b/tensorflow/python/data/experimental/kernel_tests/optimization/latency_all_edges_test.py
@@ -28,14 +28,15 @@ from tensorflow.python.platform import test
class OptimizeStatsDatasetTest(stats_dataset_test_base.StatsDatasetTestBase):
def testLatencyStatsOptimization(self):
-
stats_aggregator = stats_ops.StatsAggregator()
dataset = dataset_ops.Dataset.from_tensors(1).apply(
optimization.assert_next(
["LatencyStats", "Map", "LatencyStats", "Prefetch",
"LatencyStats"])).map(lambda x: x * x).prefetch(1).apply(
- stats_ops.set_stats_aggregator(stats_aggregator)).apply(
- optimization.optimize(["latency_all_edges"]))
+ stats_ops.set_stats_aggregator(stats_aggregator))
+ options = dataset_ops.Options()
+ options.experimental_latency_all_edges = True
+ dataset = dataset.with_options(options)
iterator = dataset.make_initializable_iterator()
get_next = iterator.get_next()
summary_t = stats_aggregator.get_summary()
diff --git a/tensorflow/python/data/experimental/kernel_tests/optimization/map_and_filter_fusion_test.py b/tensorflow/python/data/experimental/kernel_tests/optimization/map_and_filter_fusion_test.py
index a439635716..7f8a4e6406 100644
--- a/tensorflow/python/data/experimental/kernel_tests/optimization/map_and_filter_fusion_test.py
+++ b/tensorflow/python/data/experimental/kernel_tests/optimization/map_and_filter_fusion_test.py
@@ -72,7 +72,10 @@ class MapAndFilterFusionTest(test_base.DatasetTestBase, parameterized.TestCase):
for function in functions:
dataset = dataset.map(function)
- dataset = dataset.prefetch(0).apply(optimization.optimize(["map_fusion"]))
+ dataset = dataset.prefetch(0)
+ options = dataset_ops.Options()
+ options.experimental_map_fusion = True
+ dataset = dataset.with_options(options)
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
with self.cached_session() as sess:
@@ -124,9 +127,10 @@ class MapAndFilterFusionTest(test_base.DatasetTestBase, parameterized.TestCase):
def testMapFilterFusion(self, function, predicate):
dataset = dataset_ops.Dataset.range(10).apply(
optimization.assert_next(
- ["Map",
- "FilterByLastComponent"])).map(function).filter(predicate).apply(
- optimization.optimize(["map_and_filter_fusion"]))
+ ["Map", "FilterByLastComponent"])).map(function).filter(predicate)
+ options = dataset_ops.Options()
+ options.experimental_map_and_filter_fusion = True
+ dataset = dataset.with_options(options)
self._testMapAndFilter(dataset, function, predicate)
def _testMapAndFilter(self, dataset, function, predicate):
@@ -156,10 +160,11 @@ class MapAndFilterFusionTest(test_base.DatasetTestBase, parameterized.TestCase):
# We are currently not supporting functions with additional inputs.
dataset = dataset_ops.Dataset.range(10).apply(
- optimization.assert_next(
- ["Map", "Filter"])).map(function).filter(predicate).apply(
- optimization.optimize(["map_and_filter_fusion"]))
-
+ optimization.assert_next(["Map",
+ "Filter"])).map(function).filter(predicate)
+ options = dataset_ops.Options()
+ options.experimental_map_and_filter_fusion = True
+ dataset = dataset.with_options(options)
self._testMapAndFilter(dataset, function, predicate)
@staticmethod
@@ -197,8 +202,10 @@ class MapAndFilterFusionTest(test_base.DatasetTestBase, parameterized.TestCase):
for predicate in predicates:
dataset = dataset.filter(predicate)
- dataset = dataset.prefetch(0).apply(
- optimization.optimize(["filter_fusion"]))
+ dataset = dataset.prefetch(0)
+ options = dataset_ops.Options()
+ options.experimental_filter_fusion = True
+ dataset = dataset.with_options(options)
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
with self.cached_session() as sess:
diff --git a/tensorflow/python/data/experimental/kernel_tests/optimization/map_parallelization_test.py b/tensorflow/python/data/experimental/kernel_tests/optimization/map_parallelization_test.py
index 334d8e3778..ce9c9bc47b 100644
--- a/tensorflow/python/data/experimental/kernel_tests/optimization/map_parallelization_test.py
+++ b/tensorflow/python/data/experimental/kernel_tests/optimization/map_parallelization_test.py
@@ -62,8 +62,10 @@ class MapParallelizationTest(test_base.DatasetTestBase, parameterized.TestCase):
def testMapParallelization(self, function, should_optimize):
next_nodes = ["ParallelMap"] if should_optimize else ["Map"]
dataset = dataset_ops.Dataset.range(5).apply(
- optimization.assert_next(next_nodes)).map(function).apply(
- optimization.optimize(["map_parallelization"]))
+ optimization.assert_next(next_nodes)).map(function)
+ options = dataset_ops.Options()
+ options.experimental_map_parallelization = True
+ dataset = dataset.with_options(options)
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
diff --git a/tensorflow/python/data/experimental/kernel_tests/optimization/map_vectorization_test.py b/tensorflow/python/data/experimental/kernel_tests/optimization/map_vectorization_test.py
index d47492753e..32ebc49c40 100644
--- a/tensorflow/python/data/experimental/kernel_tests/optimization/map_vectorization_test.py
+++ b/tensorflow/python/data/experimental/kernel_tests/optimization/map_vectorization_test.py
@@ -69,10 +69,11 @@ class MapVectorizationTest(test_base.DatasetTestBase, parameterized.TestCase):
map_fn, num_parallel_calls=num_parallel_calls).batch(batch_size)
unoptimized = _make_dataset([map_node_name, "Batch"])
- optimized = _make_dataset(["Batch", map_node_name] if expect_optimized else
- [map_node_name, "Batch"]).apply(
- optimization.optimize(["map_vectorization"]))
-
+ optimized = _make_dataset(["Batch", map_node_name]
+ if expect_optimized else [map_node_name, "Batch"])
+ options = dataset_ops.Options()
+ options.experimental_map_vectorization = True
+ optimized = optimized.with_options(options)
return unoptimized, optimized
@parameterized.named_parameters(
@@ -179,7 +180,10 @@ class MapVectorizationBenchmark(test.Benchmark):
unoptimized = input_dataset.map(map_fn).batch(batch_size)
unoptimized_op = unoptimized.make_one_shot_iterator().get_next()
- optimized = unoptimized.apply(optimization.optimize(["map_vectorization"]))
+ optimized = input_dataset.map(map_fn).batch(batch_size)
+ options = dataset_ops.Options()
+ options.experimental_map_vectorization = True
+ optimized = optimized.with_options(options)
optimized_op = optimized.make_one_shot_iterator().get_next()
unoptimized_time = self._run(
diff --git a/tensorflow/python/data/experimental/kernel_tests/optimization/model_dataset_op_test.py b/tensorflow/python/data/experimental/kernel_tests/optimization/model_dataset_op_test.py
index a9f2ce8c03..82516356df 100644
--- a/tensorflow/python/data/experimental/kernel_tests/optimization/model_dataset_op_test.py
+++ b/tensorflow/python/data/experimental/kernel_tests/optimization/model_dataset_op_test.py
@@ -37,7 +37,9 @@ class ModelDatasetTest(test_base.DatasetTestBase):
np.random.rand(4 * k,
1))).repeat()
dataset = dataset.map(math_ops.matmul)
- iterator = dataset.apply(optimization.model()).make_one_shot_iterator()
+ options = dataset_ops.Options()
+ options.experimental_autotune = True
+ iterator = dataset.with_options(options).make_one_shot_iterator()
get_next = iterator.get_next()
deltas = []
@@ -61,7 +63,9 @@ class ModelDatasetTest(test_base.DatasetTestBase):
1))).repeat()
dataset = dataset.map(
math_ops.matmul, num_parallel_calls=optimization.AUTOTUNE)
- iterator = dataset.apply(optimization.model()).make_one_shot_iterator()
+ options = dataset_ops.Options()
+ options.experimental_autotune = True
+ iterator = dataset.with_options(options).make_one_shot_iterator()
get_next = iterator.get_next()
deltas = []
@@ -89,7 +93,9 @@ class ModelDatasetTest(test_base.DatasetTestBase):
math_ops.matmul,
num_parallel_calls=optimization.AUTOTUNE,
batch_size=batch_size))
- iterator = dataset.apply(optimization.model()).make_one_shot_iterator()
+ options = dataset_ops.Options()
+ options.experimental_autotune = True
+ iterator = dataset.with_options(options).make_one_shot_iterator()
get_next = iterator.get_next()
deltas = []
@@ -116,7 +122,9 @@ class ModelDatasetTest(test_base.DatasetTestBase):
lambda _: dataset,
cycle_length=10,
num_parallel_calls=optimization.AUTOTUNE)
- iterator = dataset.apply(optimization.model()).make_one_shot_iterator()
+ options = dataset_ops.Options()
+ options.experimental_autotune = True
+ iterator = dataset.with_options(options).make_one_shot_iterator()
get_next = iterator.get_next()
deltas = []
@@ -161,7 +169,9 @@ class ModelDatasetTest(test_base.DatasetTestBase):
lambda _: dataset, cycle_length=2)
dataset = dataset.map(f3, num_parallel_calls=optimization.AUTOTUNE)
- iterator = dataset.apply(optimization.model()).make_one_shot_iterator()
+ options = dataset_ops.Options()
+ options.experimental_autotune = True
+ iterator = dataset.with_options(options).make_one_shot_iterator()
get_next = iterator.get_next()
deltas = []
diff --git a/tensorflow/python/data/experimental/kernel_tests/optimization/noop_elimination_test.py b/tensorflow/python/data/experimental/kernel_tests/optimization/noop_elimination_test.py
index 092e0ff62a..fb0640fe9f 100644
--- a/tensorflow/python/data/experimental/kernel_tests/optimization/noop_elimination_test.py
+++ b/tensorflow/python/data/experimental/kernel_tests/optimization/noop_elimination_test.py
@@ -40,7 +40,9 @@ class NoopEliminationTest(test_base.DatasetTestBase):
["FiniteRepeat", "FiniteSkip", "Prefetch", "Prefetch"]))
dataset = dataset.repeat(some_tensor).skip(5).prefetch(0).take(-1).skip(
0).repeat(1).prefetch(0)
- dataset = dataset.apply(optimization.optimize(["noop_elimination"]))
+ options = dataset_ops.Options()
+ options.experimental_noop_elimination = True
+ dataset = dataset.with_options(options)
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
diff --git a/tensorflow/python/data/experimental/kernel_tests/optimization/optimize_dataset_op_test.py b/tensorflow/python/data/experimental/kernel_tests/optimization/optimize_dataset_op_test.py
index eb661796c0..760cd8cc4e 100644
--- a/tensorflow/python/data/experimental/kernel_tests/optimization/optimize_dataset_op_test.py
+++ b/tensorflow/python/data/experimental/kernel_tests/optimization/optimize_dataset_op_test.py
@@ -33,23 +33,10 @@ class OptimizeDatasetTest(test_base.DatasetTestBase):
def testOptimizationDefault(self):
dataset = dataset_ops.Dataset.range(10).apply(
- optimization.assert_next(
- ["Map", "Batch"])).map(lambda x: x * x).batch(10).apply(
- optimization.optimize())
- iterator = dataset.make_one_shot_iterator()
- get_next = iterator.get_next()
-
- with self.cached_session() as sess:
- self.assertAllEqual([x * x for x in range(10)], sess.run(get_next))
- with self.assertRaises(errors.OutOfRangeError):
- sess.run(get_next)
-
- def testOptimizationEmpty(self):
- dataset = dataset_ops.Dataset.range(10).apply(
- optimization.assert_next(
- ["Map", "Batch"])).map(lambda x: x * x).batch(10).apply(
- optimization.optimize([]))
- iterator = dataset.make_one_shot_iterator()
+ optimization.assert_next(["Map",
+ "Batch"])).map(lambda x: x * x).batch(10)
+ iterator = dataset.with_options(
+ dataset_ops.Options()).make_one_shot_iterator()
get_next = iterator.get_next()
with self.cached_session() as sess:
@@ -60,8 +47,10 @@ class OptimizeDatasetTest(test_base.DatasetTestBase):
def testOptimizationFusion(self):
dataset = dataset_ops.Dataset.range(10).apply(
optimization.assert_next(
- ["MapAndBatch"])).map(lambda x: x * x).batch(10).apply(
- optimization.optimize(["map_and_batch_fusion"]))
+ ["MapAndBatch"])).map(lambda x: x * x).batch(10)
+ options = dataset_ops.Options()
+ options.experimental_map_and_batch_fusion = True
+ dataset = dataset.with_options(options)
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
@@ -72,8 +61,10 @@ class OptimizeDatasetTest(test_base.DatasetTestBase):
def testOptimizationStatefulFunction(self):
dataset = dataset_ops.Dataset.range(10).map(
- lambda _: random_ops.random_uniform([])).batch(10).apply(
- optimization.optimize(["map_and_batch_fusion"]))
+ lambda _: random_ops.random_uniform([])).batch(10)
+ options = dataset_ops.Options()
+ options.experimental_map_and_batch_fusion = True
+ dataset = dataset.with_options(options)
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
@@ -82,8 +73,10 @@ class OptimizeDatasetTest(test_base.DatasetTestBase):
def testOptimizationLargeInputFromTensor(self):
input_t = array_ops.placeholder(dtypes.int32, (None, None, None))
- dataset = dataset_ops.Dataset.from_tensors(input_t).apply(
- optimization.optimize())
+ dataset = dataset_ops.Dataset.from_tensors(input_t)
+ options = dataset_ops.Options()
+ options.experimental_map_and_batch_fusion = True
+ dataset = dataset.with_options(options)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
@@ -94,8 +87,10 @@ class OptimizeDatasetTest(test_base.DatasetTestBase):
def testOptimizationLargeInputFromTensorSlices(self):
input_t = array_ops.placeholder(dtypes.int32, (None, None, None, None))
- dataset = dataset_ops.Dataset.from_tensor_slices(input_t).apply(
- optimization.optimize())
+ dataset = dataset_ops.Dataset.from_tensor_slices(input_t)
+ options = dataset_ops.Options()
+ options.experimental_map_and_batch_fusion = True
+ dataset = dataset.with_options(options)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
diff --git a/tensorflow/python/data/experimental/ops/optimization.py b/tensorflow/python/data/experimental/ops/optimization.py
index 30348ede36..276dde8383 100644
--- a/tensorflow/python/data/experimental/ops/optimization.py
+++ b/tensorflow/python/data/experimental/ops/optimization.py
@@ -20,7 +20,6 @@ from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
-from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import gen_experimental_dataset_ops
# A constant that can be used to enable auto-tuning.
@@ -58,7 +57,7 @@ def model():
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
- return _ModelDataset(dataset)
+ return dataset_ops._ModelDataset(dataset) # pylint: disable=protected-access
return _apply_fn
@@ -78,7 +77,7 @@ def optimize(optimizations=None):
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
- return _OptimizeDataset(dataset, optimizations)
+ return dataset_ops._OptimizeDataset(dataset, optimizations) # pylint: disable=protected-access
return _apply_fn
@@ -113,59 +112,3 @@ class _AssertNextDataset(dataset_ops.UnaryDataset):
def output_types(self):
return self._input_dataset.output_types
-
-class _ModelDataset(dataset_ops.UnaryDataset):
- """A `Dataset` that acts as an identity, and models performance."""
-
- def __init__(self, input_dataset):
- """See `optimize()` for details."""
- super(_ModelDataset, self).__init__(input_dataset)
- self._input_dataset = input_dataset
-
- def _as_variant_tensor(self):
- return gen_dataset_ops.model_dataset(
- self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
- **dataset_ops.flat_structure(self))
-
- @property
- def output_classes(self):
- return self._input_dataset.output_classes
-
- @property
- def output_shapes(self):
- return self._input_dataset.output_shapes
-
- @property
- def output_types(self):
- return self._input_dataset.output_types
-
-
-class _OptimizeDataset(dataset_ops.UnaryDataset):
- """A `Dataset` that acts as an identity, and applies optimizations."""
-
- def __init__(self, input_dataset, optimizations):
- """See `optimize()` for details."""
- super(_OptimizeDataset, self).__init__(input_dataset)
- self._input_dataset = input_dataset
- if optimizations is None:
- optimizations = []
- self._optimizations = ops.convert_to_tensor(
- optimizations, dtype=dtypes.string, name="optimizations")
-
- def _as_variant_tensor(self):
- return gen_dataset_ops.optimize_dataset(
- self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
- self._optimizations,
- **dataset_ops.flat_structure(self))
-
- @property
- def output_classes(self):
- return self._input_dataset.output_classes
-
- @property
- def output_shapes(self):
- return self._input_dataset.output_shapes
-
- @property
- def output_types(self):
- return self._input_dataset.output_types
diff --git a/tensorflow/python/data/kernel_tests/BUILD b/tensorflow/python/data/kernel_tests/BUILD
index cadfe7f9e0..bf76860aa4 100644
--- a/tensorflow/python/data/kernel_tests/BUILD
+++ b/tensorflow/python/data/kernel_tests/BUILD
@@ -115,8 +115,10 @@ tf_py_test(
srcs = ["dataset_ops_test.py"],
additional_deps = [
":test_base",
- "//tensorflow/core:protos_all_py",
+ "@absl_py//absl/testing:parameterized",
+ "//third_party/py/numpy",
"//tensorflow/python:client_testlib",
+ "//tensorflow/python:sparse_tensor",
"//tensorflow/python/data/ops:dataset_ops",
],
)
@@ -173,20 +175,6 @@ tf_py_test(
)
tf_py_test(
- name = "inputs_test",
- size = "small",
- srcs = ["inputs_test.py"],
- additional_deps = [
- ":test_base",
- "@absl_py//absl/testing:parameterized",
- "//third_party/py/numpy",
- "//tensorflow/python:client_testlib",
- "//tensorflow/python:sparse_tensor",
- "//tensorflow/python/data/ops:dataset_ops",
- ],
-)
-
-tf_py_test(
name = "interleave_dataset_op_test",
size = "small",
srcs = ["interleave_dataset_op_test.py"],
diff --git a/tensorflow/python/data/kernel_tests/dataset_ops_test.py b/tensorflow/python/data/kernel_tests/dataset_ops_test.py
index f115f9d9c7..b9f8875b9f 100644
--- a/tensorflow/python/data/kernel_tests/dataset_ops_test.py
+++ b/tensorflow/python/data/kernel_tests/dataset_ops_test.py
@@ -18,13 +18,20 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+from absl.testing import parameterized
+import numpy as np
+
from tensorflow.core.framework import graph_pb2
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
+from tensorflow.python.data.ops import readers
+from tensorflow.python.data.util import nest
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import sparse_tensor
from tensorflow.python.platform import test
-class DatasetOpsTest(test_base.DatasetTestBase):
+class DatasetOpsTest(test_base.DatasetTestBase, parameterized.TestCase):
def testAsSerializedGraph(self):
dataset = dataset_ops.Dataset.range(10)
@@ -33,6 +40,155 @@ class DatasetOpsTest(test_base.DatasetTestBase):
sess.run(dataset._as_serialized_graph()))
self.assertTrue(any([node.op != "RangeDataset" for node in graph.node]))
+ @staticmethod
+ def make_apply_fn(dataset):
+
+ def apply_fn(dataset):
+
+ def _apply_fn(dataset):
+ return dataset.cache()
+
+ return dataset.apply(_apply_fn)
+
+ return apply_fn
+
+ @staticmethod
+ def make_gen():
+
+ def gen():
+ yield 42
+
+ return gen
+
+ @staticmethod
+ def make_interleave_fn(dataset, num_parallel_calls=None):
+
+ def interleave_fn(dataset):
+ return dataset.interleave(
+ lambda x: dataset_ops.Dataset.range(0),
+ cycle_length=2,
+ num_parallel_calls=num_parallel_calls)
+
+ return interleave_fn
+
+ @parameterized.named_parameters(
+ ("FixedLengthRecord", readers.FixedLengthRecordDataset("", 42)),
+ ("FromGenerator",
+ dataset_ops.Dataset.from_generator(make_gen.__func__(), dtypes.int32),
+ 1),
+ ("FromSparseTensorSlices",
+ dataset_ops.Dataset.from_sparse_tensor_slices(
+ sparse_tensor.SparseTensor(
+ indices=np.array([[0, 0], [1, 0], [2, 0]]),
+ values=np.array([0, 0, 0]),
+ dense_shape=np.array([3, 1])))),
+ ("FromTensors", dataset_ops.Dataset.from_tensors([42])),
+ ("FromTensorSlices", dataset_ops.Dataset.from_tensors([42])),
+ ("Range", dataset_ops.Dataset.range(10)),
+ ("TextLine", readers.TextLineDataset("")),
+ ("TFRecord", readers.TFRecordDataset(""), 1),
+ )
+ def testDatasetSourceInputs(self, dataset, num_inputs=0):
+ self.assertEqual(num_inputs, len(dataset._inputs()))
+
+ @parameterized.named_parameters(
+ ("Apply", make_apply_fn.__func__(dataset_ops.Dataset.range(0)),
+ dataset_ops.Dataset.range(0)),
+ ("Batch", lambda x: x.batch(10), dataset_ops.Dataset.range(0)),
+ ("Cache", lambda x: x.cache(), dataset_ops.Dataset.range(0)),
+ ("Filter", lambda x: x.filter(lambda x: True),
+ dataset_ops.Dataset.range(0)),
+ ("FlatMap", lambda x: x.flat_map(lambda x: dataset_ops.Dataset.range(0)),
+ dataset_ops.Dataset.range(0)),
+ ("Interleave", make_interleave_fn.__func__(dataset_ops.Dataset.range(0)),
+ dataset_ops.Dataset.range(0)),
+ ("Map", lambda x: x.map(lambda x: x), dataset_ops.Dataset.range(0)),
+ ("PaddedBatch", lambda x: x.padded_batch(10, []),
+ dataset_ops.Dataset.range(0)),
+ ("ParallelInterleave",
+ make_interleave_fn.__func__(dataset_ops.Dataset.range(0), 2),
+ dataset_ops.Dataset.range(0)),
+ ("ParallelMap", lambda x: x.map(lambda x: x, num_parallel_calls=2),
+ dataset_ops.Dataset.range(0)),
+ ("Repeat", lambda x: x.repeat(), dataset_ops.Dataset.range(0)),
+ ("Shuffle", lambda x: x.shuffle(10), dataset_ops.Dataset.range(0)),
+ ("Skip", lambda x: x.skip(1), dataset_ops.Dataset.range(0)),
+ ("Take", lambda x: x.take(1), dataset_ops.Dataset.range(0)),
+ ("Window", lambda x: x.window(10), dataset_ops.Dataset.range(0)),
+ )
+ def testUnaryTransformationInputs(self, dataset_fn, input_dataset):
+ self.assertEqual([input_dataset], dataset_fn(input_dataset)._inputs())
+
+ @parameterized.named_parameters(
+ ("Concatenate", lambda x, y: x.concatenate(y),
+ dataset_ops.Dataset.range(0), dataset_ops.Dataset.range(1)))
+ def testBinaryTransformationInputs(self, dataset_fn, input1, input2):
+ self.assertEqual([input1, input2], dataset_fn(input1, input2)._inputs())
+
+ @parameterized.named_parameters(
+ ("ZipOne", dataset_ops.Dataset.zip, (dataset_ops.Dataset.range(0))),
+ ("ZipNest", dataset_ops.Dataset.zip,
+ (dataset_ops.Dataset.range(0),
+ (dataset_ops.Dataset.range(1), dataset_ops.Dataset.range(2)))),
+ ("ZipTuple", dataset_ops.Dataset.zip,
+ (dataset_ops.Dataset.range(0), dataset_ops.Dataset.range(1))))
+ def testVariadicTransformationInputs(self, dataset_fn, input_datasets):
+ self.assertEqual(
+ nest.flatten(input_datasets),
+ dataset_fn(input_datasets)._inputs())
+
+ def testCollectInputs(self):
+ ds1 = dataset_ops.Dataset.range(0)
+ ds2 = ds1.concatenate(ds1)
+ ds3 = dataset_ops.Dataset.zip((ds2, ds1, ds2))
+
+ inputs = []
+ queue = [ds3]
+ while queue:
+ ds = queue[0]
+ queue = queue[1:]
+ queue.extend(ds._inputs())
+ inputs.append(ds)
+
+ self.assertEqual(5, inputs.count(ds1))
+ self.assertEqual(2, inputs.count(ds2))
+ self.assertEqual(1, inputs.count(ds3))
+
+ def testOptionsDefault(self):
+ ds = dataset_ops.Dataset.range(0)
+ self.assertEqual(dataset_ops.Options(), ds.options())
+
+ def testOptionsOnce(self):
+ options = dataset_ops.Options()
+ ds = dataset_ops.Dataset.range(0).with_options(options).cache()
+ self.assertEqual(options, ds.options())
+
+ def testOptionsTwiceSame(self):
+ options = dataset_ops.Options()
+ options.experimental_autotune = True
+ ds = dataset_ops.Dataset.range(0).with_options(options).with_options(
+ options)
+ self.assertEqual(options, ds.options())
+
+ def testOptionsTwiceDifferent(self):
+ options1 = dataset_ops.Options()
+ options1.experimental_autotune = True
+ options2 = dataset_ops.Options()
+ options2.experimental_filter_fusion = False
+ ds = dataset_ops.Dataset.range(0).with_options(options1).with_options(
+ options2)
+ self.assertTrue(ds.options().experimental_autotune)
+ self.assertFalse(ds.options().experimental_filter_fusion)
+
+ def testOptionsTwiceDifferentError(self):
+ options1 = dataset_ops.Options()
+ options1.experimental_autotune = True
+ options2 = dataset_ops.Options()
+ options2.experimental_autotune = False
+ with self.assertRaisesRegexp(ValueError,
+ "Cannot merge incompatible values of option"):
+ dataset_ops.Dataset.range(0).with_options(options1).with_options(options2)
+
if __name__ == "__main__":
test.main()
diff --git a/tensorflow/python/data/ops/dataset_ops.py b/tensorflow/python/data/ops/dataset_ops.py
index 3b9d3a639d..46ce191f7b 100644
--- a/tensorflow/python/data/ops/dataset_ops.py
+++ b/tensorflow/python/data/ops/dataset_ops.py
@@ -86,6 +86,18 @@ class Dataset(object):
raise NotImplementedError("Dataset._inputs")
+ def options(self):
+ """Returns the options for this dataset.
+
+ Returns:
+ A `tf.data.Options` object representing the dataset options.
+ """
+ for input_dataset in self._inputs():
+ options = input_dataset.options()
+ if options is not None:
+ return options
+ return Options()
+
def make_initializable_iterator(self, shared_name=None):
"""Creates an `Iterator` for enumerating the elements of this dataset.
@@ -114,6 +126,13 @@ class Dataset(object):
raise RuntimeError(
"dataset.make_initializable_iterator is not supported when eager "
"execution is enabled.")
+ dataset = self
+ options = self.options()
+ static_optimizations = options._static_optimizations() # pylint: disable=protected-access
+ if static_optimizations:
+ dataset = _OptimizeDataset(dataset, static_optimizations)
+ if options.experimental_autotune:
+ dataset = _ModelDataset(dataset)
if shared_name is None:
shared_name = ""
if compat.forward_compatible(2018, 8, 3):
@@ -123,11 +142,12 @@ class Dataset(object):
iterator_resource = gen_dataset_ops.iterator(
container="", shared_name=shared_name, **flat_structure(self))
with ops.colocate_with(iterator_resource):
- initializer = gen_dataset_ops.make_iterator(self._as_variant_tensor(),
- iterator_resource)
+ initializer = gen_dataset_ops.make_iterator(
+ dataset._as_variant_tensor(), # pylint: disable=protected-access
+ iterator_resource)
return iterator_ops.Iterator(iterator_resource, initializer,
- self.output_types, self.output_shapes,
- self.output_classes)
+ dataset.output_types, dataset.output_shapes,
+ dataset.output_classes)
def __iter__(self):
"""Creates an `Iterator` for enumerating the elements of this dataset.
@@ -162,7 +182,14 @@ class Dataset(object):
# a 0-argument function.
@function.Defun(capture_by_value=True)
def _make_dataset():
- return self._as_variant_tensor() # pylint: disable=protected-access
+ dataset = self
+ options = self.options()
+ static_optimizations = options._static_optimizations() # pylint: disable=protected-access
+ if static_optimizations:
+ dataset = _OptimizeDataset(dataset, static_optimizations)
+ if options.experimental_autotune:
+ dataset = _ModelDataset(dataset)
+ return dataset._as_variant_tensor() # pylint: disable=protected-access
try:
_make_dataset.add_to_graph(ops.get_default_graph())
@@ -1325,6 +1352,146 @@ class Dataset(object):
output_shapes,
output_classes)
+ def with_options(self, options):
+ """Returns a new `tf.data.Dataset` with the given options set.
+
+ The options are "global" in the sense they apply to the entire input
+ pipeline in which the `with_options` transformation is used. If options are
+ set multiple times, they are merged if possible (see
+ `tf.data.Options.merge()` for details).
+
+ Args:
+ options: A `tf.data.Options` that identifies the options the use.
+
+ Returns:
+ Dataset: A `Dataset` with the given options.
+
+ Raises:
+ ValueError: if options are set more than once
+ """
+ return _OptionsDataset(self, options)
+
+
+@tf_export("data.Options")
+class Options(object):
+ """Represents options for tf.data.Dataset.
+
+ An `Options` object can be for instance used to control which static
+ optimizations to apply or whether to use performance modeling to dynamically
+ tune the parallelism of operations such as `tf.data.Dataset.map` or
+ `tf.data.Dataset.interleave`.
+ """
+ for _name, _ty, _docstring in [
+ ("experimental_autotune", bool,
+ "Whether to dynamically adjust the values of tunable parameters (e.g. "
+ "degrees of parallelism)."),
+ ("experimental_filter_fusion", bool,
+ "Whether to fuse filter transformations."),
+ ("experimental_hoist_random_uniform", bool,
+ "Whether to hoist `tf.random_uniform()` ops out of map transformations."
+ ),
+ ("experimental_latency_all_edges", bool,
+ "Whether to add latency measurements on all edges."),
+ ("experimental_map_and_batch_fusion", bool,
+ "Whether to fuse map and batch transformations."),
+ ("experimental_map_and_filter_fusion", bool,
+ "Whether to fuse map and filter transformations."),
+ ("experimental_map_fusion", bool, "Whether to fuse map transformations."),
+ ("experimental_map_parallelization", bool,
+ "Whether to parallelize stateless map transformations."),
+ ("experimental_map_vectorization", bool,
+ "Whether to vectorize map transformations."),
+ ("experimental_noop_elimination", bool,
+ "Whether to eliminate no-op transformations."),
+ ("experimental_shuffle_and_repeat_fusion", bool,
+ "Whether to fuse shuffle and repeat transformations."),
+ ]:
+
+ def _make_getter(name): # pylint: disable=no-self-argument
+
+ def getter(self):
+ return getattr(self, "_" + name)
+
+ return getter
+
+ def _make_setter(name, ty): # pylint: disable=no-self-argument
+
+ def setter(self, value):
+ if not isinstance(value, ty):
+ raise TypeError(
+ "Attempting to set the option %s to incompatible value: %r" %
+ (name, value))
+ setattr(self, "_" + name, value)
+
+ return setter
+
+ vars()["_" + _name] = None
+ vars()[_name] = property(
+ _make_getter(_name), _make_setter(_name, _ty), None, _docstring)
+
+ def __init__(self):
+ pass
+
+ def __eq__(self, other):
+ if isinstance(other, self.__class__):
+ return self.__dict__ == other.__dict__
+ else:
+ return False
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def _static_optimizations(self):
+ """Produces the list of enabled static optimizations."""
+ experimental_optimizations = [
+ "filter_fusion", "hoist_random_uniform", "latency_all_edges",
+ "map_and_batch_fusion", "map_and_filter_fusion", "map_fusion",
+ "map_parallelization", "map_vectorization", "noop_elimination",
+ "shuffle_and_repeat_fusion"
+ ]
+ result = []
+ for exp_opt in experimental_optimizations:
+ if getattr(self, "experimental_" + exp_opt):
+ result.append(exp_opt)
+ return result
+
+ def merge(self, options):
+ """Merges itself with the given `tf.data.Options`.
+
+ The given `tf.data.Options` can be merged as long as there does not exist an
+ attribute that is set to different values in `self` and `options`.
+
+ Args:
+ options: a `tf.data.Options` to merge with
+
+ Raises:
+ ValueError: if the given `tf.data.Options` cannot be merged
+
+ Returns:
+ New `tf.data.Options()` object which is the result of merging self with
+ the input `tf.data.Options`.
+ """
+ result = Options()
+ for other in [self, options]:
+ for name in [
+ "experimental_autotune", "experimental_filter_fusion",
+ "experimental_hoist_random_uniform", "experimental_latency_all_edges",
+ "experimental_map_and_batch_fusion",
+ "experimental_map_and_filter_fusion", "experimental_map_fusion",
+ "experimental_map_parallelization", "experimental_map_vectorization",
+ "experimental_noop_elimination",
+ "experimental_shuffle_and_repeat_fusion"
+ ]:
+ this = getattr(result, name)
+ that = getattr(other, name)
+ if that is not None:
+ if this is None:
+ setattr(result, name, that)
+ elif this != that:
+ raise ValueError(
+ "Cannot merge incompatible values of option: %s" % (name))
+ return result
+
class DatasetSource(Dataset):
"""Abstract class representing a dataset with no inputs."""
@@ -1664,6 +1831,9 @@ class StructuredFunctionWrapper(object):
flat_classes.append(component)
flat_shapes.append(component)
flat_types.append(component)
+ if t.options() is not None: # pylint: disable=protected-access
+ warnings.warn("Encountered a nested dataset with options. These "
+ "options will not be applied to the outer dataset.")
else:
try:
t = ops.convert_to_tensor(t)
@@ -2703,3 +2873,91 @@ class WindowDataset(UnaryDataset):
@property
def output_types(self):
return self._output_types
+
+
+class _OptionsDataset(UnaryDataset):
+ """An identity `Dataset` that stores options."""
+
+ def __init__(self, input_dataset, options):
+ super(_OptionsDataset, self).__init__(input_dataset)
+ self._input_dataset = input_dataset
+ self._options = input_dataset.options()
+ if self._options:
+ self._options = self._options.merge(options)
+ else:
+ self._options = options
+
+ def _as_variant_tensor(self):
+ return self._input_dataset._as_variant_tensor() # pylint: disable=protected-access
+
+ def options(self):
+ return self._options
+
+ @property
+ def output_classes(self):
+ return self._input_dataset.output_classes
+
+ @property
+ def output_shapes(self):
+ return self._input_dataset.output_shapes
+
+ @property
+ def output_types(self):
+ return self._input_dataset.output_types
+
+
+class _ModelDataset(UnaryDataset):
+ """A `Dataset` that acts as an identity, and models performance."""
+
+ def __init__(self, input_dataset):
+ """See `optimize()` for details."""
+ super(_ModelDataset, self).__init__(input_dataset)
+ self._input_dataset = input_dataset
+
+ def _as_variant_tensor(self):
+ return gen_dataset_ops.model_dataset(
+ self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
+ **flat_structure(self))
+
+ @property
+ def output_classes(self):
+ return self._input_dataset.output_classes
+
+ @property
+ def output_shapes(self):
+ return self._input_dataset.output_shapes
+
+ @property
+ def output_types(self):
+ return self._input_dataset.output_types
+
+
+class _OptimizeDataset(UnaryDataset):
+ """A `Dataset` that acts as an identity, and applies optimizations."""
+
+ def __init__(self, input_dataset, optimizations):
+ """See `optimize()` for details."""
+ super(_OptimizeDataset, self).__init__(input_dataset)
+ self._input_dataset = input_dataset
+ if optimizations is None:
+ optimizations = []
+ self._optimizations = ops.convert_to_tensor(
+ optimizations, dtype=dtypes.string, name="optimizations")
+
+ def _as_variant_tensor(self):
+ return gen_dataset_ops.optimize_dataset(
+ self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
+ self._optimizations,
+ **flat_structure(self))
+
+ @property
+ def output_classes(self):
+ return self._input_dataset.output_classes
+
+ @property
+ def output_shapes(self):
+ return self._input_dataset.output_shapes
+
+ @property
+ def output_types(self):
+ return self._input_dataset.output_types
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.data.-dataset.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.data.-dataset.pbtxt
index 825afb622f..8b7f63e43e 100644
--- a/tensorflow/tools/api/golden/v1/tensorflow.data.-dataset.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.data.-dataset.pbtxt
@@ -79,6 +79,10 @@ tf_class {
argspec: "args=[\'self\', \'map_func\', \'num_parallel_calls\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
+ name: "options"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "padded_batch"
argspec: "args=[\'self\', \'batch_size\', \'padded_shapes\', \'padding_values\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
}
@@ -119,6 +123,10 @@ tf_class {
argspec: "args=[\'self\', \'size\', \'shift\', \'stride\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'False\'], "
}
member_method {
+ name: "with_options"
+ argspec: "args=[\'self\', \'options\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "zip"
argspec: "args=[\'datasets\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.data.-fixed-length-record-dataset.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.data.-fixed-length-record-dataset.pbtxt
index cdad5f6360..a7bfa82c65 100644
--- a/tensorflow/tools/api/golden/v1/tensorflow.data.-fixed-length-record-dataset.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.data.-fixed-length-record-dataset.pbtxt
@@ -80,6 +80,10 @@ tf_class {
argspec: "args=[\'self\', \'map_func\', \'num_parallel_calls\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
+ name: "options"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "padded_batch"
argspec: "args=[\'self\', \'batch_size\', \'padded_shapes\', \'padding_values\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
}
@@ -120,6 +124,10 @@ tf_class {
argspec: "args=[\'self\', \'size\', \'shift\', \'stride\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'False\'], "
}
member_method {
+ name: "with_options"
+ argspec: "args=[\'self\', \'options\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "zip"
argspec: "args=[\'datasets\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.data.-options.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.data.-options.pbtxt
new file mode 100644
index 0000000000..d15dccc173
--- /dev/null
+++ b/tensorflow/tools/api/golden/v1/tensorflow.data.-options.pbtxt
@@ -0,0 +1,57 @@
+path: "tensorflow.data.Options"
+tf_class {
+ is_instance: "<class \'tensorflow.python.data.ops.dataset_ops.Options\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "experimental_autotune"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "experimental_filter_fusion"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "experimental_hoist_random_uniform"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "experimental_latency_all_edges"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "experimental_map_and_batch_fusion"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "experimental_map_and_filter_fusion"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "experimental_map_fusion"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "experimental_map_parallelization"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "experimental_map_vectorization"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "experimental_noop_elimination"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "experimental_shuffle_and_repeat_fusion"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "merge"
+ argspec: "args=[\'self\', \'options\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.data.-t-f-record-dataset.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.data.-t-f-record-dataset.pbtxt
index df41bff1b5..7b7a9ebaf0 100644
--- a/tensorflow/tools/api/golden/v1/tensorflow.data.-t-f-record-dataset.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.data.-t-f-record-dataset.pbtxt
@@ -80,6 +80,10 @@ tf_class {
argspec: "args=[\'self\', \'map_func\', \'num_parallel_calls\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
+ name: "options"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "padded_batch"
argspec: "args=[\'self\', \'batch_size\', \'padded_shapes\', \'padding_values\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
}
@@ -120,6 +124,10 @@ tf_class {
argspec: "args=[\'self\', \'size\', \'shift\', \'stride\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'False\'], "
}
member_method {
+ name: "with_options"
+ argspec: "args=[\'self\', \'options\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "zip"
argspec: "args=[\'datasets\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.data.-text-line-dataset.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.data.-text-line-dataset.pbtxt
index 028bcc2ce9..2817f900e1 100644
--- a/tensorflow/tools/api/golden/v1/tensorflow.data.-text-line-dataset.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.data.-text-line-dataset.pbtxt
@@ -80,6 +80,10 @@ tf_class {
argspec: "args=[\'self\', \'map_func\', \'num_parallel_calls\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
+ name: "options"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "padded_batch"
argspec: "args=[\'self\', \'batch_size\', \'padded_shapes\', \'padding_values\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
}
@@ -120,6 +124,10 @@ tf_class {
argspec: "args=[\'self\', \'size\', \'shift\', \'stride\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'False\'], "
}
member_method {
+ name: "with_options"
+ argspec: "args=[\'self\', \'options\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "zip"
argspec: "args=[\'datasets\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.data.experimental.-csv-dataset.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.data.experimental.-csv-dataset.pbtxt
index 0c0405ee02..2520e28a3c 100644
--- a/tensorflow/tools/api/golden/v1/tensorflow.data.experimental.-csv-dataset.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.data.experimental.-csv-dataset.pbtxt
@@ -81,6 +81,10 @@ tf_class {
argspec: "args=[\'self\', \'map_func\', \'num_parallel_calls\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
+ name: "options"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "padded_batch"
argspec: "args=[\'self\', \'batch_size\', \'padded_shapes\', \'padding_values\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
}
@@ -121,6 +125,10 @@ tf_class {
argspec: "args=[\'self\', \'size\', \'shift\', \'stride\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'False\'], "
}
member_method {
+ name: "with_options"
+ argspec: "args=[\'self\', \'options\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "zip"
argspec: "args=[\'datasets\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.data.experimental.-random-dataset.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.data.experimental.-random-dataset.pbtxt
index bce0be4b17..1dd53b1eab 100644
--- a/tensorflow/tools/api/golden/v1/tensorflow.data.experimental.-random-dataset.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.data.experimental.-random-dataset.pbtxt
@@ -81,6 +81,10 @@ tf_class {
argspec: "args=[\'self\', \'map_func\', \'num_parallel_calls\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
+ name: "options"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "padded_batch"
argspec: "args=[\'self\', \'batch_size\', \'padded_shapes\', \'padding_values\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
}
@@ -121,6 +125,10 @@ tf_class {
argspec: "args=[\'self\', \'size\', \'shift\', \'stride\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'False\'], "
}
member_method {
+ name: "with_options"
+ argspec: "args=[\'self\', \'options\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "zip"
argspec: "args=[\'datasets\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.data.experimental.-sql-dataset.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.data.experimental.-sql-dataset.pbtxt
index 8aeae92d96..8fdd9dc52e 100644
--- a/tensorflow/tools/api/golden/v1/tensorflow.data.experimental.-sql-dataset.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.data.experimental.-sql-dataset.pbtxt
@@ -81,6 +81,10 @@ tf_class {
argspec: "args=[\'self\', \'map_func\', \'num_parallel_calls\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
+ name: "options"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "padded_batch"
argspec: "args=[\'self\', \'batch_size\', \'padded_shapes\', \'padding_values\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
}
@@ -121,6 +125,10 @@ tf_class {
argspec: "args=[\'self\', \'size\', \'shift\', \'stride\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'False\'], "
}
member_method {
+ name: "with_options"
+ argspec: "args=[\'self\', \'options\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "zip"
argspec: "args=[\'datasets\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/v1/tensorflow.data.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.data.pbtxt
index e205157523..3023276a1d 100644
--- a/tensorflow/tools/api/golden/v1/tensorflow.data.pbtxt
+++ b/tensorflow/tools/api/golden/v1/tensorflow.data.pbtxt
@@ -13,6 +13,10 @@ tf_module {
mtype: "<type \'type\'>"
}
member {
+ name: "Options"
+ mtype: "<type \'type\'>"
+ }
+ member {
name: "TFRecordDataset"
mtype: "<class \'abc.ABCMeta\'>"
}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.data.-dataset.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.data.-dataset.pbtxt
index 825afb622f..8b7f63e43e 100644
--- a/tensorflow/tools/api/golden/v2/tensorflow.data.-dataset.pbtxt
+++ b/tensorflow/tools/api/golden/v2/tensorflow.data.-dataset.pbtxt
@@ -79,6 +79,10 @@ tf_class {
argspec: "args=[\'self\', \'map_func\', \'num_parallel_calls\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
+ name: "options"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "padded_batch"
argspec: "args=[\'self\', \'batch_size\', \'padded_shapes\', \'padding_values\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
}
@@ -119,6 +123,10 @@ tf_class {
argspec: "args=[\'self\', \'size\', \'shift\', \'stride\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'False\'], "
}
member_method {
+ name: "with_options"
+ argspec: "args=[\'self\', \'options\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "zip"
argspec: "args=[\'datasets\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.data.-fixed-length-record-dataset.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.data.-fixed-length-record-dataset.pbtxt
index cdad5f6360..a7bfa82c65 100644
--- a/tensorflow/tools/api/golden/v2/tensorflow.data.-fixed-length-record-dataset.pbtxt
+++ b/tensorflow/tools/api/golden/v2/tensorflow.data.-fixed-length-record-dataset.pbtxt
@@ -80,6 +80,10 @@ tf_class {
argspec: "args=[\'self\', \'map_func\', \'num_parallel_calls\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
+ name: "options"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "padded_batch"
argspec: "args=[\'self\', \'batch_size\', \'padded_shapes\', \'padding_values\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
}
@@ -120,6 +124,10 @@ tf_class {
argspec: "args=[\'self\', \'size\', \'shift\', \'stride\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'False\'], "
}
member_method {
+ name: "with_options"
+ argspec: "args=[\'self\', \'options\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "zip"
argspec: "args=[\'datasets\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.data.-options.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.data.-options.pbtxt
new file mode 100644
index 0000000000..d15dccc173
--- /dev/null
+++ b/tensorflow/tools/api/golden/v2/tensorflow.data.-options.pbtxt
@@ -0,0 +1,57 @@
+path: "tensorflow.data.Options"
+tf_class {
+ is_instance: "<class \'tensorflow.python.data.ops.dataset_ops.Options\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "experimental_autotune"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "experimental_filter_fusion"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "experimental_hoist_random_uniform"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "experimental_latency_all_edges"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "experimental_map_and_batch_fusion"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "experimental_map_and_filter_fusion"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "experimental_map_fusion"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "experimental_map_parallelization"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "experimental_map_vectorization"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "experimental_noop_elimination"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "experimental_shuffle_and_repeat_fusion"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "merge"
+ argspec: "args=[\'self\', \'options\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.data.-t-f-record-dataset.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.data.-t-f-record-dataset.pbtxt
index df41bff1b5..7b7a9ebaf0 100644
--- a/tensorflow/tools/api/golden/v2/tensorflow.data.-t-f-record-dataset.pbtxt
+++ b/tensorflow/tools/api/golden/v2/tensorflow.data.-t-f-record-dataset.pbtxt
@@ -80,6 +80,10 @@ tf_class {
argspec: "args=[\'self\', \'map_func\', \'num_parallel_calls\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
+ name: "options"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "padded_batch"
argspec: "args=[\'self\', \'batch_size\', \'padded_shapes\', \'padding_values\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
}
@@ -120,6 +124,10 @@ tf_class {
argspec: "args=[\'self\', \'size\', \'shift\', \'stride\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'False\'], "
}
member_method {
+ name: "with_options"
+ argspec: "args=[\'self\', \'options\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "zip"
argspec: "args=[\'datasets\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.data.-text-line-dataset.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.data.-text-line-dataset.pbtxt
index 028bcc2ce9..2817f900e1 100644
--- a/tensorflow/tools/api/golden/v2/tensorflow.data.-text-line-dataset.pbtxt
+++ b/tensorflow/tools/api/golden/v2/tensorflow.data.-text-line-dataset.pbtxt
@@ -80,6 +80,10 @@ tf_class {
argspec: "args=[\'self\', \'map_func\', \'num_parallel_calls\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
+ name: "options"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "padded_batch"
argspec: "args=[\'self\', \'batch_size\', \'padded_shapes\', \'padding_values\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
}
@@ -120,6 +124,10 @@ tf_class {
argspec: "args=[\'self\', \'size\', \'shift\', \'stride\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'False\'], "
}
member_method {
+ name: "with_options"
+ argspec: "args=[\'self\', \'options\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "zip"
argspec: "args=[\'datasets\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.data.experimental.-csv-dataset.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.data.experimental.-csv-dataset.pbtxt
index 0c0405ee02..2520e28a3c 100644
--- a/tensorflow/tools/api/golden/v2/tensorflow.data.experimental.-csv-dataset.pbtxt
+++ b/tensorflow/tools/api/golden/v2/tensorflow.data.experimental.-csv-dataset.pbtxt
@@ -81,6 +81,10 @@ tf_class {
argspec: "args=[\'self\', \'map_func\', \'num_parallel_calls\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
+ name: "options"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "padded_batch"
argspec: "args=[\'self\', \'batch_size\', \'padded_shapes\', \'padding_values\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
}
@@ -121,6 +125,10 @@ tf_class {
argspec: "args=[\'self\', \'size\', \'shift\', \'stride\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'False\'], "
}
member_method {
+ name: "with_options"
+ argspec: "args=[\'self\', \'options\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "zip"
argspec: "args=[\'datasets\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.data.experimental.-random-dataset.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.data.experimental.-random-dataset.pbtxt
index bce0be4b17..1dd53b1eab 100644
--- a/tensorflow/tools/api/golden/v2/tensorflow.data.experimental.-random-dataset.pbtxt
+++ b/tensorflow/tools/api/golden/v2/tensorflow.data.experimental.-random-dataset.pbtxt
@@ -81,6 +81,10 @@ tf_class {
argspec: "args=[\'self\', \'map_func\', \'num_parallel_calls\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
+ name: "options"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "padded_batch"
argspec: "args=[\'self\', \'batch_size\', \'padded_shapes\', \'padding_values\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
}
@@ -121,6 +125,10 @@ tf_class {
argspec: "args=[\'self\', \'size\', \'shift\', \'stride\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'False\'], "
}
member_method {
+ name: "with_options"
+ argspec: "args=[\'self\', \'options\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "zip"
argspec: "args=[\'datasets\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.data.experimental.-sql-dataset.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.data.experimental.-sql-dataset.pbtxt
index 8aeae92d96..8fdd9dc52e 100644
--- a/tensorflow/tools/api/golden/v2/tensorflow.data.experimental.-sql-dataset.pbtxt
+++ b/tensorflow/tools/api/golden/v2/tensorflow.data.experimental.-sql-dataset.pbtxt
@@ -81,6 +81,10 @@ tf_class {
argspec: "args=[\'self\', \'map_func\', \'num_parallel_calls\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
+ name: "options"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "padded_batch"
argspec: "args=[\'self\', \'batch_size\', \'padded_shapes\', \'padding_values\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'False\'], "
}
@@ -121,6 +125,10 @@ tf_class {
argspec: "args=[\'self\', \'size\', \'shift\', \'stride\', \'drop_remainder\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'False\'], "
}
member_method {
+ name: "with_options"
+ argspec: "args=[\'self\', \'options\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "zip"
argspec: "args=[\'datasets\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/v2/tensorflow.data.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.data.pbtxt
index e205157523..3023276a1d 100644
--- a/tensorflow/tools/api/golden/v2/tensorflow.data.pbtxt
+++ b/tensorflow/tools/api/golden/v2/tensorflow.data.pbtxt
@@ -13,6 +13,10 @@ tf_module {
mtype: "<type \'type\'>"
}
member {
+ name: "Options"
+ mtype: "<type \'type\'>"
+ }
+ member {
name: "TFRecordDataset"
mtype: "<class \'abc.ABCMeta\'>"
}