aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/data
diff options
context:
space:
mode:
authorGravatar Rohan Jain <rohanj@google.com>2018-09-26 17:33:52 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-09-26 17:38:11 -0700
commitbfda65cc70526c919c57ef8321dd282e463ed8a3 (patch)
tree600b4de887833957895b585ea759e86e76e7c017 /tensorflow/contrib/data
parente8ffb444411f20c435ac72a1beb866e39686fba0 (diff)
Removing _PerDeviceGenerator and MultiDeviceIterator from contrib now that they're moved to core. I overlooked this in the CL to move to core.
PiperOrigin-RevId: 214699544
Diffstat (limited to 'tensorflow/contrib/data')
-rw-r--r--tensorflow/contrib/data/python/ops/prefetching_ops.py189
1 files changed, 0 insertions, 189 deletions
diff --git a/tensorflow/contrib/data/python/ops/prefetching_ops.py b/tensorflow/contrib/data/python/ops/prefetching_ops.py
index f994425304..58395879e6 100644
--- a/tensorflow/contrib/data/python/ops/prefetching_ops.py
+++ b/tensorflow/contrib/data/python/ops/prefetching_ops.py
@@ -31,7 +31,6 @@ from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
-from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_dataset_ops as core_gen_dataset_ops
from tensorflow.python.ops import resource_variable_ops
@@ -526,191 +525,3 @@ class _CopyToDeviceDataset(dataset_ops.UnaryDataset):
@property
def output_classes(self):
return self._input_dataset.output_classes
-
-
-class _PerDeviceGenerator(dataset_ops.Dataset):
- """A `dummy` generator dataset."""
-
- def __init__(self, shard_num, multi_device_iterator_resource, incarnation_id,
- source_device, target_device, output_shapes, output_types,
- output_classes):
- self._target_device = target_device
- self._output_types = output_types
- self._output_shapes = output_shapes
- self._output_classes = output_classes
- self._flat_output_shapes = nest.flatten(
- sparse.as_dense_shapes(self._output_shapes, self._output_classes))
- self._flat_output_types = nest.flatten(
- sparse.as_dense_types(self._output_types, self._output_classes))
-
- multi_device_iterator_string_handle = (
- gen_dataset_ops.multi_device_iterator_to_string_handle(
- multi_device_iterator_resource))
-
- @function.Defun()
- def _init_func():
- return multi_device_iterator_string_handle
-
- @function.Defun()
- def _remote_init_func():
- return functional_ops.remote_call(
- target=source_device,
- args=_init_func.captured_inputs,
- Tout=[dtypes.string],
- f=_init_func)
-
- self._init_func = _remote_init_func
- self._init_captured_args = _remote_init_func.captured_inputs
-
- @function.Defun(dtypes.string)
- def _next_func(string_handle):
- multi_device_iterator = (
- gen_dataset_ops.multi_device_iterator_from_string_handle(
- string_handle=string_handle,
- output_types=self._flat_output_types,
- output_shapes=self._flat_output_shapes))
- return gen_dataset_ops.multi_device_iterator_get_next_from_shard(
- multi_device_iterator=multi_device_iterator,
- shard_num=shard_num,
- incarnation_id=incarnation_id,
- output_types=self._flat_output_types,
- output_shapes=self._flat_output_shapes)
-
- @function.Defun(dtypes.string)
- def _remote_next_func(string_handle):
- return functional_ops.remote_call(
- target=source_device,
- args=[string_handle] + _next_func.captured_inputs,
- Tout=self._flat_output_types,
- f=_next_func)
-
- self._next_func = _remote_next_func
- self._next_captured_args = _remote_next_func.captured_inputs
-
- @function.Defun(dtypes.string)
- def _finalize_func(unused_string_handle):
- return array_ops.constant(0, dtypes.int64)
-
- @function.Defun(dtypes.string)
- def _remote_finalize_func(string_handle):
- return functional_ops.remote_call(
- target=source_device,
- args=[string_handle] + _finalize_func.captured_inputs,
- Tout=[dtypes.int64],
- f=_finalize_func)
-
- self._finalize_func = _remote_finalize_func
- self._finalize_captured_args = _remote_finalize_func.captured_inputs
-
- def _as_variant_tensor(self):
- with ops.device(self._target_device):
- return core_gen_dataset_ops.generator_dataset(
- self._init_captured_args,
- self._next_captured_args,
- self._finalize_captured_args,
- init_func=self._init_func,
- next_func=self._next_func,
- finalize_func=self._finalize_func,
- output_types=self._flat_output_types,
- output_shapes=self._flat_output_shapes)
-
- def _inputs(self):
- # TODO(b/116506223): Determine which datasets should be used as inputs here.
- return []
-
- @property
- def output_types(self):
- return self._output_types
-
- @property
- def output_shapes(self):
- return self._output_shapes
-
- @property
- def output_classes(self):
- return self._output_classes
-
-
-class MultiDeviceIterator(object):
- """An iterator over multiple devices."""
-
- def __init__(self,
- dataset,
- devices,
- max_buffer_size=1,
- prefetch_buffer_size=1,
- source_device="/cpu:0"):
- """Constructs a MultiDeviceIterator.
-
- Args:
- dataset: The input dataset to be iterated over.
- devices: The list of devices to fetch data to.
- max_buffer_size: Maximum size of the host side per device buffer to keep.
- prefetch_buffer_size: if > 1, then we setup a buffer on each device
- to prefetch into.
- source_device: The host device to place the `dataset` on.
- """
- self._dataset = dataset
- self._devices = devices
- self._source_device = source_device
- self._source_device_tensor = ops.convert_to_tensor(source_device)
-
- self._flat_output_shapes = nest.flatten(
- sparse.as_dense_shapes(self._dataset.output_shapes,
- self._dataset.output_classes))
- self._flat_output_types = nest.flatten(
- sparse.as_dense_types(self._dataset.output_types,
- self._dataset.output_classes))
-
- # Create the MultiDeviceIterator.
- with ops.device(self._source_device):
- self._multi_device_iterator_resource = (
- gen_dataset_ops.multi_device_iterator(
- devices=self._devices,
- shared_name="",
- container="",
- output_types=self._flat_output_types,
- output_shapes=self._flat_output_shapes))
-
- # The incarnation ID is used to ensure consistency between the per-device
- # iterators and the multi-device iterator.
- self._incarnation_id = gen_dataset_ops.multi_device_iterator_init(
- self._dataset._as_variant_tensor(), # pylint: disable=protected-access
- self._multi_device_iterator_resource,
- max_buffer_size=max_buffer_size)
-
- # TODO(rohanj): Explore the possibility of the MultiDeviceIterator to
- # initialize the device side of the pipeline. This would allow the
- # MultiDeviceIterator to choose, for example, to move some transformations
- # into the device side from its input. It might be useful in rewriting.
- # Create the per device iterators.
- self._device_iterators = []
- i = 0
- for device in self._devices:
- ds = _PerDeviceGenerator(
- i, self._multi_device_iterator_resource, self._incarnation_id,
- self._source_device_tensor, device, self._dataset.output_shapes,
- self._dataset.output_types, self._dataset.output_classes)
- if prefetch_buffer_size > 0:
- ds = ds.prefetch(prefetch_buffer_size)
- with ops.device(device):
- self._device_iterators.append(ds.make_initializable_iterator())
- i += 1
-
- device_iterator_initializers = [
- iterator.initializer for iterator in self._device_iterators
- ]
- self._initializer = control_flow_ops.group(*device_iterator_initializers)
-
- def get_next(self):
- result = []
- i = 0
- for device in self._devices:
- with ops.device(device):
- result.append(self._device_iterators[i].get_next())
- i += 1
- return result
-
- @property
- def initializer(self):
- return self._initializer