aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/python/ops
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/python/ops')
-rw-r--r--tensorflow/python/ops/candidate_sampling_ops.py2
-rw-r--r--tensorflow/python/ops/check_ops.py2
-rw-r--r--tensorflow/python/ops/control_flow_ops.py2
-rw-r--r--tensorflow/python/ops/ctc_ops.py2
-rw-r--r--tensorflow/python/ops/data_flow_ops.py743
-rw-r--r--tensorflow/python/ops/distributions/transformed_distribution.py4
-rw-r--r--tensorflow/python/ops/embedding_ops.py2
-rw-r--r--tensorflow/python/ops/hidden_ops.txt1
-rw-r--r--tensorflow/python/ops/image_ops.py1
-rw-r--r--tensorflow/python/ops/image_ops_impl.py3
-rw-r--r--tensorflow/python/ops/image_ops_test.py4
-rw-r--r--tensorflow/python/ops/io_ops.py19
-rw-r--r--tensorflow/python/ops/math_ops.py25
-rw-r--r--tensorflow/python/ops/math_ops_test.py4
-rw-r--r--tensorflow/python/ops/rnn_cell_impl.py2
-rw-r--r--tensorflow/python/ops/script_ops.py2
-rw-r--r--tensorflow/python/ops/session_ops.py2
-rw-r--r--tensorflow/python/ops/sparse_ops.py4
-rw-r--r--tensorflow/python/ops/special_math_ops.py2
-rw-r--r--tensorflow/python/ops/state_ops.py2
-rw-r--r--tensorflow/python/ops/variable_scope.py2
-rw-r--r--tensorflow/python/ops/variables.py4
22 files changed, 699 insertions, 135 deletions
diff --git a/tensorflow/python/ops/candidate_sampling_ops.py b/tensorflow/python/ops/candidate_sampling_ops.py
index 3053a333bf..d6294c24f5 100644
--- a/tensorflow/python/ops/candidate_sampling_ops.py
+++ b/tensorflow/python/ops/candidate_sampling_ops.py
@@ -249,7 +249,7 @@ def fixed_unigram_candidate_sampler(true_classes,
`distortion = 1.0` gives regular unigram sampling (as defined by the vocab
file), and `distortion = 0.0` gives a uniform distribution.
num_reserved_ids: Optionally some reserved IDs can be added in the range
- `[0, num_reserved_ids]` by the users. One use case is that a special
+ `[0, num_reserved_ids)` by the users. One use case is that a special
unknown word token is used as ID 0. These IDs will have a sampling
probability of 0.
num_shards: A sampler can be used to sample from a subset of the original
diff --git a/tensorflow/python/ops/check_ops.py b/tensorflow/python/ops/check_ops.py
index 753999a672..1d853df86c 100644
--- a/tensorflow/python/ops/check_ops.py
+++ b/tensorflow/python/ops/check_ops.py
@@ -726,7 +726,7 @@ def _assert_ranks_condition(
# Attempt to statically defined rank.
ranks_static = tuple([tensor_util.constant_value(rank) for rank in ranks])
- if None not in ranks_static:
+ if not any(r is None for r in ranks_static):
for rank_static in ranks_static:
if rank_static.ndim != 0:
raise ValueError('Rank must be a scalar.')
diff --git a/tensorflow/python/ops/control_flow_ops.py b/tensorflow/python/ops/control_flow_ops.py
index f1d34cb0e8..478e0a9472 100644
--- a/tensorflow/python/ops/control_flow_ops.py
+++ b/tensorflow/python/ops/control_flow_ops.py
@@ -326,7 +326,7 @@ def switch(data, pred, dtype=None, name=None):
def _SwitchRefOrTensor(data, pred, name="Switch"):
"""Forwards `data` to an output determined by `pred`.
- If `pred` is false, the `data` input is forwared to the first output.
+ If `pred` is false, the `data` input is forwarded to the first output.
Otherwise, the data goes to the second output.
This op handles `Tensor`s and `IndexedSlices`.
diff --git a/tensorflow/python/ops/ctc_ops.py b/tensorflow/python/ops/ctc_ops.py
index 4ea4d9ed2d..477c0d1cb4 100644
--- a/tensorflow/python/ops/ctc_ops.py
+++ b/tensorflow/python/ops/ctc_ops.py
@@ -37,7 +37,7 @@ def ctc_loss(labels, inputs, sequence_length,
This op implements the CTC loss as presented in the article:
[A. Graves, S. Fernandez, F. Gomez, J. Schmidhuber.
- Connectionist Temporal Classification: Labelling Unsegmented Sequence Data
+ Connectionist Temporal Classification: Labeling Unsegmented Sequence Data
with Recurrent Neural Networks. ICML 2006, Pittsburgh, USA, pp. 369-376.](http://www.cs.toronto.edu/~graves/icml_2006.pdf)
Input requirements:
diff --git a/tensorflow/python/ops/data_flow_ops.py b/tensorflow/python/ops/data_flow_ops.py
index c272a7115d..4eead79531 100644
--- a/tensorflow/python/ops/data_flow_ops.py
+++ b/tensorflow/python/ops/data_flow_ops.py
@@ -1,4 +1,4 @@
-# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -516,7 +516,7 @@ class QueueBase(object):
that would block will fail immediately.
If `cancel_pending_enqueues` is `True`, all pending requests will also
- be cancelled.
+ be canceled.
Args:
cancel_pending_enqueues: (Optional.) A boolean, defaulting to
@@ -988,7 +988,7 @@ class Barrier(object):
TakeMany operations that would block will fail immediately.
If `cancel_pending_enqueues` is `True`, all pending requests to the
- underlying queue will also be cancelled, and completing of already
+ underlying queue will also be canceled, and completing of already
started values is also not acceptable anymore.
Args:
@@ -1344,72 +1344,30 @@ class SparseConditionalAccumulator(ConditionalAccumulatorBase):
dense_shape=return_val.shape)
-class StagingArea(object):
- """Class for staging inputs. No ordering guarantees.
-
- A `StagingArea` is a TensorFlow data structure that stores tensors across
- multiple steps, and exposes operations that can put and get tensors.
-
- Each `StagingArea` element is a tuple of one or more tensors, where each
- tuple component has a static dtype, and may have a static shape.
-
- The capacity of a `StagingArea` is unbounded and supports multiple
- concurrent producers and consumers; and provides exactly-once delivery.
-
- Each element of a `StagingArea` is a fixed-length tuple of tensors whose
- dtypes are described by `dtypes`, and whose shapes are optionally described
- by the `shapes` argument.
-
- If the `shapes` argument is specified, each component of a staging area
- element must have the respective fixed shape. If it is
- unspecified, different elements may have different shapes,
- """
-
+class BaseStagingArea(object):
+ """Base class for Staging Areas."""
_identifier = 0
_lock = threading.Lock()
- def __init__(self, dtypes, shapes=None, names=None, shared_name=None):
- """Constructs a staging area object.
-
- The two optional lists, `shapes` and `names`, must be of the same length
- as `dtypes` if provided. The values at a given index `i` indicate the
- shape and name to use for the corresponding queue component in `dtypes`.
-
- The device scope at the time of object creation determines where the
- storage for the `StagingArea` will reside. Calls to `put` will incur a copy
- to this memory space, if necessary. Tensors returned by `get` will be
- placed according to the device scope when `get` is called.
-
- Args:
- dtypes: A list of types. The length of dtypes must equal the number
- of tensors in each element.
- shapes: (Optional.) Constraints on the shapes of tensors in an element.
- A list of shape tuples or None. This list is the same length
- as dtypes. If the shape of any tensors in the element are constrained,
- all must be; shapes can be None if the shapes should not be constrained.
- names: (Optional.) If provided, the `get()` and
- `put()` methods will use dictionaries with these names as keys.
- Must be None or a list or tuple of the same length as `dtypes`.
- shared_name: (Optional.) A name to be used for the shared object. By
- passing the same name to two different python objects they will share
- the underlying staging area. Must be a string.
-
- Raises:
- ValueError: If one of the arguments is invalid.
- """
+ def __init__(self, dtypes, shapes=None, names=None, shared_name=None,
+ capacity=0, memory_limit=0):
if shared_name is None:
- self._name = ops.get_default_graph().unique_name("StagingArea")
+ self._name = (ops.get_default_graph()
+ .unique_name(self.__class__.__name__))
elif isinstance(shared_name, six.string_types):
self._name = shared_name
else:
raise ValueError("shared_name must be a string")
+
self._dtypes = dtypes
+
if shapes is not None:
if len(shapes) != len(dtypes):
raise ValueError("StagingArea shapes must be the same length as dtypes")
self._shapes = [tensor_shape.TensorShape(s) for s in shapes]
else:
self._shapes = [tensor_shape.unknown_shape() for _ in self._dtypes]
+
if names is not None:
if len(names) != len(dtypes):
raise ValueError("StagingArea names must be the same length as dtypes")
@@ -1417,6 +1375,9 @@ class StagingArea(object):
else:
self._names = None
+ self._capacity = capacity
+ self._memory_limit = memory_limit
+
# all get and put ops must colocate with this op
with ops.name_scope("%s_root" % self._name):
self._coloc_op = control_flow_ops.no_op()
@@ -1441,52 +1402,141 @@ class StagingArea(object):
"""The list of names for each component of a staging area element."""
return self._names
- def _check_put_dtypes(self, vals):
+ @property
+ def capacity(self):
+ """The maximum number of elements of this staging area."""
+ return self._capacity
+
+ @property
+ def memory_limit(self):
+ """The maximum number of bytes of this staging area."""
+ return self._memory_limit
+
+ def _check_put_dtypes(self, vals, indices=None):
"""Validate and convert `vals` to a list of `Tensor`s.
The `vals` argument can be a Tensor, a list or tuple of tensors, or a
dictionary with tensor values.
+ If `vals` is a list, then the appropriate indices associated with the
+ values must be provided.
+
If it is a dictionary, the staging area must have been constructed with a
`names` attribute and the dictionary keys must match the staging area names.
+ `indices` will be inferred from the dictionary keys.
If the staging area was constructed with a `names` attribute, `vals` must
be a dictionary.
+ Checks that the dtype and shape of each value matches that
+ of the staging area.
+
Args:
vals: A tensor, a list or tuple of tensors, or a dictionary..
Returns:
- A list of `Tensor` objects.
+ A (tensors, indices) tuple where `tensors` is a list of `Tensor` objects
+ and `indices` is a list of indices associed with the tensors.
Raises:
- ValueError: If `vals` is invalid.
+ ValueError: If `vals` or `indices` is invalid.
"""
if isinstance(vals, dict):
if not self._names:
raise ValueError(
"Staging areas must have names to enqueue a dictionary")
- if sorted(self._names) != sorted(vals.keys()):
+ if not set(vals.keys()).issubset(self._names):
raise ValueError("Keys in dictionary to put do not match names "
"of staging area. Dictionary: (%s), Queue: (%s)" %
(sorted(vals.keys()), sorted(self._names)))
# The order of values in `self._names` indicates the order in which the
# tensors in the dictionary `vals` must be listed.
- vals = [vals[k] for k in self._names]
+ vals, indices, n = zip(*[(vals[k], i, k) for i, k in enumerate(self._names)
+ if k in vals])
else:
if self._names:
raise ValueError("You must enqueue a dictionary in a staging area "
"with names")
+
+ if indices is None:
+ raise ValueError("Indices must be supplied when inserting a list "
+ "of tensors")
+
+ if len(indices) != len(vals):
+ raise ValueError("Number of indices '%s' doesn't match "
+ "number of values '%s'")
+
if not isinstance(vals, (list, tuple)):
vals = [vals]
+ indices = [0]
+
+ # Sanity check number of values
+ if not len(vals) <= len(self._dtypes):
+ raise ValueError("Unexpected number of inputs '%s' vs '%s'" % (
+ len(values), len(self._dtypes)))
tensors = []
- for i, (val, dtype) in enumerate(zip(vals, self._dtypes)):
- tensors.append(
- ops.convert_to_tensor(
- val, dtype=dtype, name="component_%d" % i))
+
+ for val, i in zip(vals, indices):
+ dtype, shape = self._dtypes[i], self._shapes[i]
+ # Check dtype
+ if not val.dtype == dtype:
+ raise ValueError("Datatypes do not match. '%s' != '%s'" %(
+ str(val.dtype), str(dtype)))
+
+ # Check shape
+ val.get_shape().assert_is_compatible_with(shape)
+
+ tensors.append(ops.convert_to_tensor(val, dtype=dtype,
+ name="component_%d" % i))
+
+ return tensors, indices
+
+ def _create_device_transfers(self, tensors):
+ """Encode inter-device transfers if the current device
+ is not the same as the Staging Area's device
+ """
+
+ if not isinstance(tensors, (tuple, list)):
+ tensors = [tensors]
+
+ curr_device_scope = control_flow_ops.no_op().device
+
+ if curr_device_scope != self._coloc_op.device:
+ tensors = [array_ops.identity(t) for t in tensors]
return tensors
+ def _get_return_value(self, tensors, indices):
+ """Return the value to return from a get op.
+
+ If the staging area has names, return a dictionary with the
+ names as keys. Otherwise return either a single tensor
+ or a list of tensors depending on the length of `tensors`.
+
+ Args:
+ tensors: List of tensors from the get op.
+ indices: Indices of associated names and shapes
+
+ Returns:
+ A single tensor, a list of tensors, or a dictionary
+ of tensors.
+ """
+
+ tensors = self._create_device_transfers(tensors)
+
+ # Sets shape
+ for output, i in zip(tensors, indices):
+ output.set_shape(self._shapes[i])
+
+ if self._names:
+ # The returned values in `tensors` are in the same order as
+ # the names in `self._names`.
+ return {self._names[i]: t for t, i in zip(tensors, indices)}
+ elif len(tensors) == 1:
+ return tensors[0]
+ else:
+ return tensors
+
def _scope_vals(self, vals):
"""Return a list of values to pass to `name_scope()`.
@@ -1503,9 +1553,86 @@ class StagingArea(object):
else:
return [vals]
+class StagingArea(BaseStagingArea):
+ """Class for staging inputs. No ordering guarantees.
+
+ A `StagingArea` is a TensorFlow data structure that stores tensors across
+ multiple steps, and exposes operations that can put and get tensors.
+
+ Each `StagingArea` element is a tuple of one or more tensors, where each
+ tuple component has a static dtype, and may have a static shape.
+
+ The capacity of a `StagingArea` may be bounded or unbounded.
+ It supports multiple concurrent producers and consumers; and
+ provides exactly-once delivery.
+
+ Each element of a `StagingArea` is a fixed-length tuple of tensors whose
+ dtypes are described by `dtypes`, and whose shapes are optionally described
+ by the `shapes` argument.
+
+ If the `shapes` argument is specified, each component of a staging area
+ element must have the respective fixed shape. If it is
+ unspecified, different elements may have different shapes,
+
+ It can be configured with a capacity in which case
+ put(values) will block until space becomes available.
+
+ Similarly, it can be configured with a memory limit which
+ will block put(values) until space is available.
+ This is mostly useful for limiting the number of tensors on
+ devices such as GPUs.
+
+ All get() and peek() commands block if the the requested data
+ is not present in the Staging Area.
+
+ """
+
+ def __init__(self, dtypes, shapes=None, names=None, shared_name=None,
+ capacity=0, memory_limit=0):
+ """Constructs a staging area object.
+
+ The two optional lists, `shapes` and `names`, must be of the same length
+ as `dtypes` if provided. The values at a given index `i` indicate the
+ shape and name to use for the corresponding queue component in `dtypes`.
+
+ The device scope at the time of object creation determines where the
+ storage for the `StagingArea` will reside. Calls to `put` will incur a copy
+ to this memory space, if necessary. Tensors returned by `get` will be
+ placed according to the device scope when `get` is called.
+
+ Args:
+ dtypes: A list of types. The length of dtypes must equal the number
+ of tensors in each element.
+ capacity: (Optional.) Maximum number of elements.
+ An integer. If zero, the Staging Area is unbounded
+ memory_limit: (Optional.) Maximum number of bytes of all tensors
+ in the Staging Area.
+ An integer. If zero, the Staging Area is unbounded
+ shapes: (Optional.) Constraints on the shapes of tensors in an element.
+ A list of shape tuples or None. This list is the same length
+ as dtypes. If the shape of any tensors in the element are constrained,
+ all must be; shapes can be None if the shapes should not be constrained.
+ names: (Optional.) If provided, the `get()` and
+ `put()` methods will use dictionaries with these names as keys.
+ Must be None or a list or tuple of the same length as `dtypes`.
+ shared_name: (Optional.) A name to be used for the shared object. By
+ passing the same name to two different python objects they will share
+ the underlying staging area. Must be a string.
+
+ Raises:
+ ValueError: If one of the arguments is invalid.
+ """
+
+ super(StagingArea, self).__init__(dtypes, shapes,
+ names, shared_name,
+ capacity, memory_limit)
+
def put(self, values, name=None):
"""Create an op that places a value into the staging area.
+ This operation will block if the `StagingArea` has reached
+ its capacity.
+
Args:
values: Tensor (or a tuple of Tensors) to place into the staging area.
name: A name for the operation (optional).
@@ -1518,46 +1645,25 @@ class StagingArea(object):
"""
with ops.name_scope(name, "%s_put" % self._name,
self._scope_vals(values)) as scope:
- vals = self._check_put_dtypes(values)
- if len(values) != len(self._dtypes):
- raise ValueError("Unexpected number of inputs " + str(len(values)) +
- "vs " + str(len(self._dtypes)))
- for val, dtype in zip(vals, self._dtypes):
- if val.dtype != dtype:
- raise ValueError("Datatypes do not match. " + str(val.dtype) + " != "
- + str(dtype))
- for val, shape in zip(vals, self._shapes):
- val.get_shape().assert_is_compatible_with(shape)
+ # Hard-code indices for this staging area
+ indices = (list(six.moves.range(len(values)))
+ if isinstance(values, (list, tuple)) else None)
+ vals, _ = self._check_put_dtypes(values, indices)
with ops.colocate_with(self._coloc_op):
op = gen_data_flow_ops.stage(values=vals, shared_name=self._name,
- name=scope)
+ name=scope, capacity=self._capacity,
+ memory_limit=self._memory_limit)
return op
- def _get_return_value(self, tensors):
- """Return the value to return from a get op.
-
- If the staging area has names, return a dictionary with the
- names as keys. Otherwise return either a single tensor
- or a list of tensors depending on the length of `tensors`.
-
- Args:
- tensors: List of tensors from the get op.
+ def __internal_get(self, get_fn, name):
+ with ops.colocate_with(self._coloc_op):
+ ret = get_fn()
- Returns:
- A single tensor, a list of tensors, or a dictionary
- of tensors.
- """
- if self._names:
- # The returned values in `tensors` are in the same order as
- # the names in `self._names`.
- return {n: tensors[i] for i, n in enumerate(self._names)}
- elif len(tensors) == 1:
- return tensors[0]
- else:
- return tensors
+ indices = list(six.moves.range(len(self._dtypes))) # Hard coded
+ return self._get_return_value(ret, indices)
def get(self, name=None):
"""Gets one element from this staging area.
@@ -1584,19 +1690,448 @@ class StagingArea(object):
if name is None:
name = "%s_get" % self._name
+ fn = lambda: gen_data_flow_ops.unstage(dtypes=self._dtypes,
+ shared_name=self._name, name=name,
+ capacity=self._capacity,
+ memory_limit=self._memory_limit)
+
+ return self.__internal_get(fn, name)
+
+ def peek(self, index, name=None):
+ """Peeks at an element in the staging area.
+
+ If the staging area is too small to contain the element at
+ the specified index, it will block until enough elements
+ are inserted to complete the operation.
+
+ The placement of the returned tensor will be determined by
+ the current device scope when this function is called.
+
+ Args:
+ index: The index of the tensor within the staging area
+ to look up.
+ name: A name for the operation (optional).
+
+ Returns:
+ The tuple of tensors that was gotten.
+ """
+ if name is None:
+ name = "%s_peek" % self._name
+
+ fn = lambda: gen_data_flow_ops.stage_peek(index,
+ dtypes=self._dtypes, shared_name=self._name,
+ name=name, capacity=self._capacity,
+ memory_limit=self._memory_limit)
+
+ return self.__internal_get(fn, name)
+
+ def size(self, name=None):
+ """Returns the number of elements in the staging area.
+
+ Args:
+ name: A name for the operation (optional)
+
+ Returns:
+ The created op
+ """
+ if name is None:
+ name = "%s_size" % self._name
+
+ return gen_data_flow_ops.stage_size(name=name, shared_name=self._name,
+ dtypes=self._dtypes, capacity=self._capacity,
+ memory_limit=self._memory_limit)
+
+ def clear(self, name=None):
+ """Clears the staging area.
+
+ Args:
+ name: A name for the operation (optional)
+
+ Returns:
+ The created op
+ """
+ if name is None:
+ name = "%s_clear" % self._name
+
+ return gen_data_flow_ops.stage_clear(name=name, shared_name=self._name,
+ dtypes=self._dtypes, capacity=self._capacity,
+ memory_limit=self._memory_limit)
+
+class MapStagingArea(BaseStagingArea):
+ """
+ A `MapStagingArea` is a TensorFlow data structure that stores tensors across
+ multiple steps, and exposes operations that can put and get tensors.
+
+ Each `MapStagingArea` element is a (key, value) pair.
+ Only int64 keys are supported, other types should be
+ hashed to produce a key.
+ Values are a tuple of one or more tensors.
+ Each tuple component has a static dtype,
+ and may have a static shape.
+
+ The capacity of a `MapStagingArea` may be bounded or unbounded.
+ It supports multiple concurrent producers and consumers; and
+ provides exactly-once delivery.
+
+ Each value tuple of a `MapStagingArea` is a fixed-length tuple of tensors whose
+ dtypes are described by `dtypes`, and whose shapes are optionally described
+ by the `shapes` argument.
+
+ If the `shapes` argument is specified, each component of a staging area
+ element must have the respective fixed shape. If it is
+ unspecified, different elements may have different shapes,
+
+ It behaves like an associative container with support for:
+
+ - put(key, values)
+ - peek(key) like dict.get(key)
+ - get(key) like dict.pop(key)
+ - get(key=None) like dict.popitem()
+ - size()
+ - clear()
+
+ If ordered a tree structure ordered by key will be used and
+ get(key=None) will remove (key, value) pairs in increasing key order.
+ Otherwise a hashtable
+
+ It can be configured with a capacity in which case
+ put(key, values) will block until space becomes available.
+
+ Similarly, it can be configured with a memory limit which
+ will block put(key, values) until space is available.
+ This is mostly useful for limiting the number of tensors on
+ devices such as GPUs.
+
+ All get() and peek() commands block if the requested
+ (key, value) pair is not present in the staging area.
+
+ Partial puts are supported and will be placed in an incomplete
+ map until such time as all values associated with the key have
+ been inserted. Once completed, this (key, value) pair will be
+ inserted into the map. Data in the incomplete map
+ counts towards the memory limit, but not towards capacity limit.
+
+ Partial gets from the map are also supported.
+ This removes the partially requested tensors from the entry,
+ but the entry is only removed from the map once all tensors
+ associated with it are removed.
+ """
+
+ def __init__(self, dtypes, shapes=None, names=None, shared_name=None,
+ ordered=False, capacity=0, memory_limit=0):
+ """
+ Args:
+ dtypes: A list of types. The length of dtypes must equal the number
+ of tensors in each element.
+ capacity: (Optional.) Maximum number of elements.
+ An integer. If zero, the Staging Area is unbounded
+ memory_limit: (Optional.) Maximum number of bytes of all tensors
+ in the Staging Area (excluding keys).
+ An integer. If zero, the Staging Area is unbounded
+ ordered: (Optional.) If True the underlying data structure
+ is a tree ordered on key. Otherwise assume a hashtable.
+ shapes: (Optional.) Constraints on the shapes of tensors in an element.
+ A list of shape tuples or None. This list is the same length
+ as dtypes. If the shape of any tensors in the element are constrained,
+ all must be; shapes can be None if the shapes should not be constrained.
+ names: (Optional.) If provided, the `get()` and
+ `put()` methods will use dictionaries with these names as keys.
+ Must be None or a list or tuple of the same length as `dtypes`.
+ shared_name: (Optional.) A name to be used for the shared object. By
+ passing the same name to two different python objects they will share
+ the underlying staging area. Must be a string.
+
+ Raises:
+ ValueError: If one of the arguments is invalid.
+
+ """
+
+ super(MapStagingArea, self).__init__(dtypes, shapes,
+ names, shared_name,
+ capacity, memory_limit)
+
+ # Defer to different methods depending if the map is ordered
+ self._ordered = ordered
+
+ if ordered:
+ self._put_fn = gen_data_flow_ops.ordered_map_stage
+ self._pop_fn = gen_data_flow_ops.ordered_map_unstage
+ self._popitem_fn = gen_data_flow_ops.ordered_map_unstage_no_key
+ self._peek_fn = gen_data_flow_ops.ordered_map_peek
+ self._size_fn = gen_data_flow_ops.ordered_map_size
+ self._incomplete_size_fn = gen_data_flow_ops.ordered_map_incomplete_size
+ self._clear_fn = gen_data_flow_ops.ordered_map_clear
+ else:
+ self._put_fn = gen_data_flow_ops.map_stage
+ self._pop_fn = gen_data_flow_ops.map_unstage
+ self._popitem_fn = gen_data_flow_ops.map_unstage_no_key
+ self._peek_fn = gen_data_flow_ops.map_peek
+ self._size_fn = gen_data_flow_ops.map_size
+ self._incomplete_size_fn = gen_data_flow_ops.map_incomplete_size
+ self._clear_fn = gen_data_flow_ops.map_clear
+
+ def put(self, key, vals, indices=None, name=None):
+ """
+ Create an op that stores the (key, vals) pair in the staging area.
+
+ Incomplete puts are possible, preferably using a dictionary for vals
+ as the appropriate dtypes and shapes can be inferred from the value names
+ dictionary key values. If vals is a list or tuple, indices must
+ also be specified so that the op knows at which element position
+ to perform the insert.
+
+ This operation will block if the capacity or memory limit of this
+ container is reached.
+
+ Args:
+ key: Key associated with the data
+ vals: Tensor (or a dict/tuple of Tensors) to place
+ into the staging area.
+ indices: (Optional) if vals is a tuple/list, this is required.
+ name: A name for the operation (optional)
+
+ Returns:
+ The created op
+
+ Raises:
+ ValueError: If the number or type of inputs don't match the staging area.
+ """
+
+ with ops.name_scope(name, "%s_put" % self._name,
+ self._scope_vals(vals)) as scope:
+
+ vals, indices = self._check_put_dtypes(vals, indices)
+
+ with ops.colocate_with(self._coloc_op):
+ op = self._put_fn(key, indices, vals, dtypes=self._dtypes,
+ shared_name=self._name, name=scope,
+ capacity=self._capacity,
+ memory_limit=self._memory_limit)
+ return op
+
+ def _get_indices_and_dtypes(self, indices=None):
+ if indices is None:
+ indices = list(six.moves.range(len(self._dtypes)))
+
+ if not isinstance(indices, (tuple, list)):
+ raise TypeError("Invalid indices type '%s'" % type(indices))
+
+ if len(indices) == 0:
+ raise ValueError("Empty indices")
+
+ if all(isinstance(i, str) for i in indices):
+ if self._names is None:
+ raise ValueError("String indices provided '%s', but this Staging Area "
+ "was not created with names." % indices)
+
+ try:
+ indices = [self._names.index(n) for n in indices]
+ except ValueError:
+ raise ValueError("Named index '%s' not in "
+ "Staging Area names '%s'" % (n, self._names))
+ elif all(isinstance(i, int) for i in indices):
+ pass
+ else:
+ raise TypeError("Mixed types in indices '%s'. "
+ "May only be str or int" % indices)
+
+ dtypes = [self._dtypes[i] for i in indices]
+
+ return indices, dtypes
+
+
+ def peek(self, key, indices=None, name=None):
+ """
+ Peeks at staging area data associated with the key.
+
+ If the key is not in the staging area, it will block
+ until the associated (key, value) is inserted.
+
+ Args:
+ key: Key associated with the required data
+ indices: Partial list of tensors to retrieve (optional).
+ A list of integer or string indices.
+ String indices are only valid if the Staging Area
+ has names associated with it.
+ name: A name for the operation (optional)
+
+ Returns:
+ The created op
+ """
+
+ if name is None:
+ name = "%s_pop" % self._name
+
+ indices, dtypes = self._get_indices_and_dtypes(indices)
+
+ with ops.colocate_with(self._coloc_op):
+ result = self._peek_fn(key, shared_name=self._name,
+ indices=indices,
+ dtypes=dtypes,
+ name=name,
+ capacity=self._capacity,
+ memory_limit=self._memory_limit)
+
+ return self._get_return_value(result, indices)
+
+ def get(self, key=None, indices=None, name=None):
+ """
+ If the key is provided, the associated (key, value)
+ is returned from the staging area. If the key is not
+ in the staging area, this method will block until
+ the associated (key, value) is inserted.
+
+ If no key is provided and the staging area is ordered,
+ the (key, value) with the smallest key will be returned.
+ Otherwise, a random (key, value) will be returned.
+
+ If the staging area is empty when this operation executes,
+ it will block until there is an element to dequeue.
+
+ Args:
+ key: Key associated with the required data (Optional)
+ indices: Partial list of tensors to retrieve (optional).
+ A list of integer or string indices.
+ String indices are only valid if the Staging Area
+ has names associated with it.
+ name: A name for the operation (optional)
+
+ Returns:
+ The created op
+ """
+ if key is None:
+ return self._popitem(indices=indices, name=name)
+ else:
+ return self._pop(key, indices=indices, name=name)
+
+ def _pop(self, key, indices=None, name=None):
+ """
+ Remove and return the associated (key, value)
+ is returned from the staging area. If the key is not
+ in the staging area, this method will block until
+ the associated (key, value) is inserted.
+
+ Args:
+ key: Key associated with the required data
+ indices: Partial list of tensors to retrieve (optional).
+ A list of integer or string indices.
+ String indices are only valid if the Staging Area
+ has names associated with it.
+ name: A name for the operation (optional)
+
+ Returns:
+ The created op
+ """
+ if name is None:
+ name = "%s_get" % self._name
+
+ indices, dtypes = self._get_indices_and_dtypes(indices)
+
with ops.colocate_with(self._coloc_op):
- ret = gen_data_flow_ops.unstage(dtypes=self._dtypes,
- shared_name=self._name, name=name)
+ result = self._pop_fn(key, shared_name=self._name,
+ indices=indices,
+ dtypes=dtypes,
+ name=name,
+ capacity=self._capacity,
+ memory_limit=self._memory_limit)
- curr_device_scope = control_flow_ops.no_op().device
- if curr_device_scope != self._coloc_op.device:
- for i in range(len(ret)):
- ret[i] = array_ops.identity(ret[i])
+ return key, self._get_return_value(result, indices)
- for output, shape in zip(ret, self._shapes):
- output.set_shape(shape)
+ def _popitem(self, indices=None, name=None):
+ """
+ If the staging area is ordered,
+ the (key, value) with the smallest key will be returned.
+ Otherwise, a random (key, value) will be returned.
+
+ If the staging area is empty when this operation executes,
+ it will block until there is an element to dequeue.
+
+ Args:
+ key: Key associated with the required data
+ indices: Partial list of tensors to retrieve (optional).
+ A list of integer or string indices.
+ String indices are only valid if the Staging Area
+ has names associated with it.
+ name: A name for the operation (optional)
+
+ Returns:
+ The created op
+ """
+ if name is None:
+ name = "%s_get_nokey" % self._name
+
+ indices, dtypes = self._get_indices_and_dtypes(indices)
+
+ with ops.colocate_with(self._coloc_op):
+ key, result = self._popitem_fn(shared_name=self._name,
+ indices=indices,
+ dtypes=dtypes,
+ name=name,
+ capacity=self._capacity,
+ memory_limit=self._memory_limit)
+
+ # Separate keys and results out from
+ # underlying namedtuple
+ key = self._create_device_transfers(key)[0]
+ result = self._get_return_value(result, indices)
+
+ return key, result
+
+ def size(self, name=None):
+ """
+ Returns the number of elements in the staging area.
+
+ Args:
+ name: A name for the operation (optional)
+
+ Returns:
+ The created op
+ """
+ if name is None:
+ name = "%s_size" % self._name
+
+ return self._size_fn(shared_name=self._name,
+ name=name, dtypes=self._dtypes,
+ capacity=self._capacity,
+ memory_limit=self._memory_limit)
+
+ def incomplete_size(self, name=None):
+ """
+ Returns the number of incomplete elements in the staging area.
+
+ Args:
+ name: A name for the operation (optional)
+
+ Returns:
+ The created op
+ """
+ if name is None:
+ name = "%s_incomplete_size" % self._name
+
+ return self._incomplete_size_fn(shared_name=self._name,
+ name=name, dtypes=self._dtypes,
+ capacity=self._capacity,
+ memory_limit=self._memory_limit)
+
+
+
+ def clear(self, name=None):
+ """
+ Clears the staging area.
+
+ Args:
+ name: A name for the operation (optional)
+
+ Returns:
+ The created op
+ """
+ if name is None:
+ name = "%s_clear" % self._name
- return self._get_return_value(ret)
+ return self._clear_fn(shared_name=self._name,
+ name=name, dtypes=self._dtypes,
+ capacity=self._capacity,
+ memory_limit=self._memory_limit)
class RecordInput(object):
diff --git a/tensorflow/python/ops/distributions/transformed_distribution.py b/tensorflow/python/ops/distributions/transformed_distribution.py
index 09b26a9fb7..1be3819569 100644
--- a/tensorflow/python/ops/distributions/transformed_distribution.py
+++ b/tensorflow/python/ops/distributions/transformed_distribution.py
@@ -339,7 +339,7 @@ class TransformedDistribution(distribution_lib.Distribution):
self.distribution.event_shape_tensor()))
def _event_shape(self):
- # If there's a chance that the event_shape has been overriden, we return
+ # If there's a chance that the event_shape has been overridden, we return
# what we statically know about the `event_shape_override`. This works
# because: `_is_maybe_event_override` means `static_override` is `None` or a
# non-empty list, i.e., we don't statically know the `event_shape` or we do.
@@ -360,7 +360,7 @@ class TransformedDistribution(distribution_lib.Distribution):
self.distribution.batch_shape_tensor())
def _batch_shape(self):
- # If there's a chance that the batch_shape has been overriden, we return
+ # If there's a chance that the batch_shape has been overridden, we return
# what we statically know about the `batch_shape_override`. This works
# because: `_is_maybe_batch_override` means `static_override` is `None` or a
# non-empty list, i.e., we don't statically know the `batch_shape` or we do.
diff --git a/tensorflow/python/ops/embedding_ops.py b/tensorflow/python/ops/embedding_ops.py
index 6930f9af05..4c94f9e9b5 100644
--- a/tensorflow/python/ops/embedding_ops.py
+++ b/tensorflow/python/ops/embedding_ops.py
@@ -97,7 +97,7 @@ def embedding_lookup(params, ids, partition_strategy="mod", name=None,
Raises:
ValueError: If `params` is empty.
"""
- if params in (None, (), []):
+ if params is None or params in ((), []):
raise ValueError("Need at least one param")
if isinstance(params, variables.PartitionedVariable):
params = list(params) # Iterate to get the underlying Variables.
diff --git a/tensorflow/python/ops/hidden_ops.txt b/tensorflow/python/ops/hidden_ops.txt
index 06adfc5066..553e0dc135 100644
--- a/tensorflow/python/ops/hidden_ops.txt
+++ b/tensorflow/python/ops/hidden_ops.txt
@@ -191,6 +191,7 @@ WholeFileReader
TextLineReaderV2
TFRecordReaderV2
WholeFileReaderV2
+LMDBReader
# linalg_ops
BatchCholesky
diff --git a/tensorflow/python/ops/image_ops.py b/tensorflow/python/ops/image_ops.py
index 75c67dcb3c..51d0276140 100644
--- a/tensorflow/python/ops/image_ops.py
+++ b/tensorflow/python/ops/image_ops.py
@@ -60,6 +60,7 @@ See the @{$python/image} guide.
@@per_image_standardization
@@draw_bounding_boxes
@@non_max_suppression
+@@non_max_suppression_v2
@@sample_distorted_bounding_box
@@total_variation
"""
diff --git a/tensorflow/python/ops/image_ops_impl.py b/tensorflow/python/ops/image_ops_impl.py
index b16c1863dd..65a1399c5b 100644
--- a/tensorflow/python/ops/image_ops_impl.py
+++ b/tensorflow/python/ops/image_ops_impl.py
@@ -52,6 +52,7 @@ ops.NotDifferentiable('SampleDistortedBoundingBox')
# latent bugs here.
ops.NotDifferentiable('ExtractGlimpse')
ops.NotDifferentiable('NonMaxSuppression')
+ops.NotDifferentiable('NonMaxSuppressionV2')
def _assert(cond, ex_type, msg):
@@ -281,7 +282,7 @@ def flip_left_right(image):
def flip_up_down(image):
- """Flip an image horizontally (upside down).
+ """Flip an image vertically (upside down).
Outputs the contents of `image` flipped along the first dimension, which is
`height`.
diff --git a/tensorflow/python/ops/image_ops_test.py b/tensorflow/python/ops/image_ops_test.py
index 492dbe6d13..5588d18ef1 100644
--- a/tensorflow/python/ops/image_ops_test.py
+++ b/tensorflow/python/ops/image_ops_test.py
@@ -1449,7 +1449,7 @@ class PadToBoundingBoxTest(test_util.TensorFlowTestCase):
use_tensor_inputs_options=[False])
# The orignal error message does not contain back slashes. However, they
- # are added by either the assert op or the runtime. If this behaviour
+ # are added by either the assert op or the runtime. If this behavior
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
x,
@@ -2281,7 +2281,7 @@ class ResizeImageWithCropOrPadTest(test_util.TensorFlowTestCase):
use_tensor_inputs_options=[False])
# The orignal error message does not contain back slashes. However, they
- # are added by either the assert op or the runtime. If this behaviour
+ # are added by either the assert op or the runtime. If this behavior
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
x,
diff --git a/tensorflow/python/ops/io_ops.py b/tensorflow/python/ops/io_ops.py
index 68ecc219e4..0b1a77969a 100644
--- a/tensorflow/python/ops/io_ops.py
+++ b/tensorflow/python/ops/io_ops.py
@@ -443,6 +443,25 @@ class TFRecordReader(ReaderBase):
ops.NotDifferentiable("TFRecordReader")
+class LMDBReader(ReaderBase):
+ """A Reader that outputs the records from a LMDB file.
+
+ See ReaderBase for supported methods.
+ """
+ def __init__(self, name=None, options=None):
+ """Create a LMDBReader.
+
+ Args:
+ name: A name for the operation (optional).
+ options: A LMDBRecordOptions object (optional).
+ """
+ rr = gen_io_ops._lmdb_reader(name=name)
+ super(LMDBReader, self).__init__(rr)
+
+
+ops.NotDifferentiable("LMDBReader")
+
+
class IdentityReader(ReaderBase):
"""A Reader that outputs the queued work as both the key and value.
diff --git a/tensorflow/python/ops/math_ops.py b/tensorflow/python/ops/math_ops.py
index 3b7332e863..89b7746e71 100644
--- a/tensorflow/python/ops/math_ops.py
+++ b/tensorflow/python/ops/math_ops.py
@@ -208,19 +208,25 @@ argmin.__doc__ = (gen_math_ops.arg_min.__doc__.replace("dimensions",
def abs(x, name=None):
r"""Computes the absolute value of a tensor.
- Given a tensor of real numbers `x`, this operation returns a tensor
- containing the absolute value of each element in `x`. For example, if x is
- an input element and y is an output element, this operation computes
- \\(y = |x|\\).
+ Given a tensor `x` of complex numbers, this operation returns a tensor of type
+ `float32` or `float64` that is the absolute value of each element in `x`. All
+ elements in `x` must be complex numbers of the form \\(a + bj\\). The
+ absolute value is computed as \\( \sqrt{a^2 + b^2}\\). For example:
+ ```
+ # tensor 'x' is [[-2.25 + 4.75j], [-3.25 + 5.75j]]
+ tf.complex_abs(x) ==> [5.25594902, 6.60492229]
+ ```
Args:
- x: A `Tensor` or `SparseTensor` of type `float32`, `float64`, `int32`, or
- `int64`.
+ x: A `Tensor` or `SparseTensor` of type `float32`, `float64`, `int32`,
+ `int64`, `complex64` or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` the same size and type as `x` with absolute
values.
+ Note, for `complex64` or `complex128' input, the returned `Tensor` will be
+ of type `float32` or `float64`, respectively.
"""
with ops.name_scope(name, "Abs", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
@@ -386,7 +392,7 @@ def sign(x, name=None):
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
@compatibility(numpy)
- Equivalent to numpy.sign except for the behaviour for input values of NaN.
+ Equivalent to numpy.sign except for the behavior for input values of NaN.
@end_compatibility
"""
with ops.name_scope(name, "Sign", [x]) as name:
@@ -1675,8 +1681,9 @@ def matmul(a,
name=None):
"""Multiplies matrix `a` by matrix `b`, producing `a` * `b`.
- The inputs must be matrices (or tensors of rank > 2, representing batches of
- matrices), with matching inner dimensions, possibly after transposition.
+ The inputs must, following any transpositions, be tensors of rank >= 2
+ where the inner 2 dimensions specify valid matrix multiplication arguments,
+ and any further outer dimensions match.
Both matrices must be of the same type. The supported types are:
`float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.
diff --git a/tensorflow/python/ops/math_ops_test.py b/tensorflow/python/ops/math_ops_test.py
index a9089d461f..9683603785 100644
--- a/tensorflow/python/ops/math_ops_test.py
+++ b/tensorflow/python/ops/math_ops_test.py
@@ -424,9 +424,9 @@ class DivAndModTest(test_util.TensorFlowTestCase):
tf_divs = array_ops.constant(divs)
tf2_result = (tf_nums // tf_divs * tf_divs + tf_nums % tf_divs).eval()
np_result = (nums // divs) * divs + (nums % divs)
- # consistentcy with numpy
+ # Consistent with numpy
self.assertAllEqual(tf_result, np_result)
- # consistentcy with two forms of divide
+ # Consistent with two forms of divide
self.assertAllEqual(tf_result, tf2_result)
# consistency for truncation form
tf3_result = (math_ops.truncatediv(nums, divs) * divs +
diff --git a/tensorflow/python/ops/rnn_cell_impl.py b/tensorflow/python/ops/rnn_cell_impl.py
index 500e3b7859..49a4aba473 100644
--- a/tensorflow/python/ops/rnn_cell_impl.py
+++ b/tensorflow/python/ops/rnn_cell_impl.py
@@ -233,7 +233,7 @@ class BasicRNNCell(RNNCell):
"""The most basic RNN cell.
Args:
- num_units: int, The number of units in the LSTM cell.
+ num_units: int, The number of units in the RNN cell.
activation: Nonlinearity to use. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
diff --git a/tensorflow/python/ops/script_ops.py b/tensorflow/python/ops/script_ops.py
index ebe1f5c0a4..fe532fa186 100644
--- a/tensorflow/python/ops/script_ops.py
+++ b/tensorflow/python/ops/script_ops.py
@@ -13,7 +13,7 @@
# limitations under the License.
# ==============================================================================
-"""Script Language Operators. See the @{$python/script_ops} guide.
+"""Script Language Operators. See the @{python/script_ops} guide.
@@py_func
"""
diff --git a/tensorflow/python/ops/session_ops.py b/tensorflow/python/ops/session_ops.py
index e74c52b8cf..de43b562f9 100644
--- a/tensorflow/python/ops/session_ops.py
+++ b/tensorflow/python/ops/session_ops.py
@@ -13,7 +13,7 @@
# limitations under the License.
# ==============================================================================
-"""Tensor Handle Operations. See the @{$python/session_ops} guide.
+"""Tensor Handle Operations. See the @{python/session_ops} guide.
@@get_session_handle
@@get_session_handle_v2
diff --git a/tensorflow/python/ops/sparse_ops.py b/tensorflow/python/ops/sparse_ops.py
index b196ed05b7..7079922736 100644
--- a/tensorflow/python/ops/sparse_ops.py
+++ b/tensorflow/python/ops/sparse_ops.py
@@ -14,7 +14,7 @@
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
-"""Sparse Tensor Representation. See the @{$python/sparse_ops} guide.
+"""Sparse Tensor Representation. See the @{python/sparse_ops} guide.
@@SparseTensor
@@SparseTensorValue
@@ -1478,7 +1478,7 @@ def sparse_tensor_dense_matmul(sp_a,
`sp_a.dense_shape` takes on large values.
Below is a rough speed comparison between `sparse_tensor_dense_matmul`,
- labelled 'sparse', and `matmul`(a_is_sparse=True), labelled 'dense'. For
+ labeled 'sparse', and `matmul`(a_is_sparse=True), labeled 'dense'. For
purposes of the comparison, the time spent converting from a `SparseTensor` to
a dense `Tensor` is not included, so it is overly conservative with respect to
the time ratio.
diff --git a/tensorflow/python/ops/special_math_ops.py b/tensorflow/python/ops/special_math_ops.py
index 851fba0beb..b561203bb4 100644
--- a/tensorflow/python/ops/special_math_ops.py
+++ b/tensorflow/python/ops/special_math_ops.py
@@ -424,7 +424,7 @@ def _exponential_space_einsum(equation, *inputs):
missing_idx = set(idx_out).difference(idx_all)
if missing_idx:
raise ValueError(
- 'Unknown ouput axes: %s' % missing_idx
+ 'Unknown output axes: %s' % missing_idx
)
axis_order = {}
diff --git a/tensorflow/python/ops/state_ops.py b/tensorflow/python/ops/state_ops.py
index dbc637975d..63394d5214 100644
--- a/tensorflow/python/ops/state_ops.py
+++ b/tensorflow/python/ops/state_ops.py
@@ -13,7 +13,7 @@
# limitations under the License.
# ==============================================================================
-"""Variables. See the @{$python/state_ops} guide.
+"""Variables. See the @{python/state_ops} guide.
@@Variable
@@global_variables
diff --git a/tensorflow/python/ops/variable_scope.py b/tensorflow/python/ops/variable_scope.py
index a29ddfa9f2..aceffd373a 100644
--- a/tensorflow/python/ops/variable_scope.py
+++ b/tensorflow/python/ops/variable_scope.py
@@ -282,7 +282,7 @@ class _VariableStore(object):
# If a *_ref type is passed in an error would be triggered further down the
# stack. We prevent this using base_dtype to get a non-ref version of the
- # type, before doing anything else. When _ref types are removed in favour of
+ # type, before doing anything else. When _ref types are removed in favor of
# resources, this line can be removed.
try:
dtype = dtype.base_dtype
diff --git a/tensorflow/python/ops/variables.py b/tensorflow/python/ops/variables.py
index 1797460a6d..5968f2684b 100644
--- a/tensorflow/python/ops/variables.py
+++ b/tensorflow/python/ops/variables.py
@@ -1196,7 +1196,7 @@ def initialize_variables(var_list, name="init"):
def global_variables_initializer():
"""Returns an Op that initializes global variables.
- This is just a shortcut for `variable_initializer(global_variables())`
+ This is just a shortcut for `variables_initializer(global_variables())`
Returns:
An Op that initializes global variables in the graph.
@@ -1214,7 +1214,7 @@ def initialize_all_variables():
def local_variables_initializer():
"""Returns an Op that initializes all local variables.
- This is just a shortcut for `variable_initializer(local_variables())`
+ This is just a shortcut for `variables_initializer(local_variables())`
Returns:
An Op that initializes all local variables in the graph.