aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2017-11-07 23:19:03 -0800
committerGravatar Andrew Selle <aselle@andyselle.com>2017-11-10 16:14:35 -0800
commita9e3905de7d44d33efb056d292c9faa1006cb740 (patch)
tree2fabaa10cdf0adffba3d05e169ad1b52162712cf
parentba46a05afa45293e20c305cafc466c5c8a29517c (diff)
Update ops-related pbtxt files.
PiperOrigin-RevId: 174964560
-rw-r--r--tensorflow/core/ops/ops.pbtxt362
1 files changed, 362 insertions, 0 deletions
diff --git a/tensorflow/core/ops/ops.pbtxt b/tensorflow/core/ops/ops.pbtxt
index d35decc182..8353b45e22 100644
--- a/tensorflow/core/ops/ops.pbtxt
+++ b/tensorflow/core/ops/ops.pbtxt
@@ -6059,6 +6059,32 @@ op {
description: "By default, this op performs an inclusive cumsum, which means that the first\nelement of the input is identical to the first element of the output:\n\n```python\ntf.cumsum([a, b, c]) # => [a, a + b, a + b + c]\n```\n\nBy setting the `exclusive` kwarg to `True`, an exclusive cumsum is\nperformed instead:\n\n```python\ntf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b]\n```\n\nBy setting the `reverse` kwarg to `True`, the cumsum is performed in the\nopposite direction:\n\n```python\ntf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c]\n```\n\nThis is more efficient than using separate `tf.reverse` ops.\n\nThe `reverse` and `exclusive` kwargs can also be combined:\n\n```python\ntf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0]\n```"
}
op {
+ name: "DatasetToSingleElement"
+ input_arg {
+ name: "dataset"
+ description: "A handle to a dataset that contains a single element."
+ type: DT_VARIANT
+ }
+ output_arg {
+ name: "components"
+ description: "The components of the single element of `input`."
+ type_list_attr: "output_types"
+ }
+ attr {
+ name: "output_types"
+ type: "list(type)"
+ has_minimum: true
+ minimum: 1
+ }
+ attr {
+ name: "output_shapes"
+ type: "list(shape)"
+ has_minimum: true
+ minimum: 1
+ }
+ summary: "Outputs the single element from the given dataset."
+}
+op {
name: "DebugGradientIdentity"
input_arg {
name: "input"
@@ -6690,6 +6716,41 @@ op {
description: "See SetOperationOp::SetOperationFromContext for values of `set_operation`.\n\nOutput `result` is a `SparseTensor` represented by `result_indices`,\n`result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this\nhas rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`\ndimension contains the result of `set_operation` applied to the corresponding\n`[0...n-1]` dimension of `set`."
}
op {
+ name: "DenseToSparseBatchDataset"
+ input_arg {
+ name: "input_dataset"
+ description: "A handle to an input dataset. Must have a single component."
+ type: DT_VARIANT
+ }
+ input_arg {
+ name: "batch_size"
+ description: "A scalar representing the number of elements to accumulate in a\nbatch."
+ type: DT_INT64
+ }
+ input_arg {
+ name: "row_shape"
+ description: "A vector representing the dense shape of each row in the produced\nSparseTensor. The shape may be partially specified, using `-1` to indicate\nthat a particular dimension should use the maximum size of all batch elements."
+ type: DT_INT64
+ }
+ output_arg {
+ name: "handle"
+ type: DT_VARIANT
+ }
+ attr {
+ name: "output_types"
+ type: "list(type)"
+ has_minimum: true
+ minimum: 1
+ }
+ attr {
+ name: "output_shapes"
+ type: "list(shape)"
+ has_minimum: true
+ minimum: 1
+ }
+ summary: "Creates a dataset that yields a SparseTensor for each element of the input."
+}
+op {
name: "DenseToSparseSetOperation"
input_arg {
name: "set1"
@@ -7029,6 +7090,21 @@ op {
description: "[min_range, max_range] are scalar floats that specify the range for\nthe \'input\' data. The \'mode\' attribute controls exactly which calculations are\nused to convert the float values to their quantized equivalents.\n\nIn \'MIN_COMBINED\' mode, each value of the tensor will undergo the following:\n\n```\nif T == qint8, in[i] += (range(T) + 1)/ 2.0\nout[i] = min_range + (in[i]* (max_range - min_range) / range(T))\n```\nhere `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`\n\n*MIN_COMBINED Mode Example*\n\nIf the input comes from a QuantizedRelu6, the output type is\nquint8 (range of 0-255) but the possible range of QuantizedRelu6 is\n0-6. The min_range and max_range values are therefore 0.0 and 6.0.\nDequantize on quint8 will take each value, cast to float, and multiply\nby 6 / 255.\nNote that if quantizedtype is qint8, the operation will additionally add\neach value by 128 prior to casting.\n\nIf the mode is \'MIN_FIRST\', then this approach is used:\n\n```c++\nnum_discrete_values = 1 << (# of bits in T)\nrange_adjust = num_discrete_values / (num_discrete_values - 1)\nrange = (range_max - range_min) * range_adjust\nrange_scale = range / num_discrete_values\nconst double offset_input = static_cast<double>(input) - lowest_quantized;\nresult = range_min + ((input - numeric_limits<T>::min()) * range_scale)\n```\n\n*SCALED mode Example*\n\n`SCALED` mode matches the quantization approach used in\n`QuantizeAndDequantize{V2|V3}`.\n\nIf the mode is `SCALED`, we do not use the full range of the output type,\nchoosing to elide the lowest possible value for symmetry (e.g., output range is\n-127 to 127, not -128 to 127 for signed 8 bit quantization), so that 0.0 maps to\n0.\n\nWe first find the range of values in our tensor. The\nrange we use is always centered on 0, so we find m such that\n```c++\n m = max(abs(input_min), abs(input_max))\n```\n\nOur input tensor range is then `[-m, m]`.\n\nNext, we choose our fixed-point quantization buckets, `[min_fixed, max_fixed]`.\nIf T is signed, this is\n```\n num_bits = sizeof(T) * 8\n [min_fixed, max_fixed] =\n [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1]\n```\n\nOtherwise, if T is unsigned, the fixed-point range is\n```\n [min_fixed, max_fixed] = [0, (1 << num_bits) - 1]\n```\n\nFrom this we compute our scaling factor, s:\n```c++\n s = (2 * m) / (max_fixed - min_fixed)\n```\n\nNow we can dequantize the elements of our tensor:\n```c++\nresult = input * s\n```"
}
op {
+ name: "DeserializeIterator"
+ input_arg {
+ name: "resource_handle"
+ description: "A handle to an iterator resource."
+ type: DT_RESOURCE
+ }
+ input_arg {
+ name: "serialized"
+ description: "A variant tensor storing the state of the iterator contained in the\nresource."
+ type: DT_VARIANT
+ }
+ summary: "Converts the given variant tensor to an iterator and stores it in the given resource."
+ is_stateful: true
+}
+op {
name: "DeserializeManySparse"
input_arg {
name: "serialized_sparse"
@@ -10143,6 +10219,71 @@ op {
description: "*NOTE*: `GreaterEqual` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)"
}
op {
+ name: "GroupByWindowDataset"
+ input_arg {
+ name: "input_dataset"
+ type: DT_VARIANT
+ }
+ input_arg {
+ name: "key_func_other_arguments"
+ type_list_attr: "Tkey_func_other_arguments"
+ }
+ input_arg {
+ name: "reduce_func_other_arguments"
+ type_list_attr: "Treduce_func_other_arguments"
+ }
+ input_arg {
+ name: "window_size_func_other_arguments"
+ type_list_attr: "Twindow_size_func_other_arguments"
+ }
+ output_arg {
+ name: "handle"
+ type: DT_VARIANT
+ }
+ attr {
+ name: "key_func"
+ type: "func"
+ description: "A function mapping an element of `input_dataset`, concatenated\nwith `key_func_other_arguments` to a scalar value of type DT_INT64."
+ }
+ attr {
+ name: "reduce_func"
+ type: "func"
+ }
+ attr {
+ name: "window_size_func"
+ type: "func"
+ }
+ attr {
+ name: "Tkey_func_other_arguments"
+ type: "list(type)"
+ has_minimum: true
+ }
+ attr {
+ name: "Treduce_func_other_arguments"
+ type: "list(type)"
+ has_minimum: true
+ }
+ attr {
+ name: "Twindow_size_func_other_arguments"
+ type: "list(type)"
+ has_minimum: true
+ }
+ attr {
+ name: "output_types"
+ type: "list(type)"
+ has_minimum: true
+ minimum: 1
+ }
+ attr {
+ name: "output_shapes"
+ type: "list(shape)"
+ has_minimum: true
+ minimum: 1
+ }
+ summary: "Creates a dataset that computes a windowed group-by on `input_dataset`."
+ description: "// TODO(mrry): Support non-int64 keys."
+}
+op {
name: "HSVToRGB"
input_arg {
name: "images"
@@ -10603,6 +10744,30 @@ op {
description: "The upper regularized incomplete Gamma function is defined as:\n\n\\\\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\\\)\n\nwhere\n\n\\\\(Gamma(a, x) = int_{x}^{\\infty} t^{a-1} exp(-t) dt\\\\)\n\nis the upper incomplete Gama function.\n\nNote, above `P(a, x)` (`Igamma`) is the lower regularized complete\nGamma function."
}
op {
+ name: "IgnoreErrorsDataset"
+ input_arg {
+ name: "input_dataset"
+ type: DT_VARIANT
+ }
+ output_arg {
+ name: "handle"
+ type: DT_VARIANT
+ }
+ attr {
+ name: "output_types"
+ type: "list(type)"
+ has_minimum: true
+ minimum: 1
+ }
+ attr {
+ name: "output_shapes"
+ type: "list(shape)"
+ has_minimum: true
+ minimum: 1
+ }
+ summary: "Creates a dataset that contains the elements of `input_dataset` ignoring errors."
+}
+op {
name: "Imag"
input_arg {
name: "input"
@@ -12374,6 +12539,54 @@ op {
is_stateful: true
}
op {
+ name: "MapAndBatchDataset"
+ input_arg {
+ name: "input_dataset"
+ type: DT_VARIANT
+ }
+ input_arg {
+ name: "other_arguments"
+ type_list_attr: "Targuments"
+ }
+ input_arg {
+ name: "batch_size"
+ description: "A scalar representing the number of elements to accumulate in a\nbatch. It determines the number of concurrent invocations of `f` that process\nelements from `input_dataset` in parallel."
+ type: DT_INT64
+ }
+ input_arg {
+ name: "num_parallel_batches"
+ description: "A scalar representing the number of batches to create in\nparallel. Processing multiple batches in parallel benefits workloads prone to\nstragglers."
+ type: DT_INT64
+ }
+ output_arg {
+ name: "handle"
+ type: DT_VARIANT
+ }
+ attr {
+ name: "f"
+ type: "func"
+ }
+ attr {
+ name: "Targuments"
+ type: "list(type)"
+ has_minimum: true
+ }
+ attr {
+ name: "output_types"
+ type: "list(type)"
+ has_minimum: true
+ minimum: 1
+ }
+ attr {
+ name: "output_shapes"
+ type: "list(shape)"
+ has_minimum: true
+ minimum: 1
+ }
+ summary: "Creates a dataset that applies `f` to the outputs of `input_dataset` and then"
+ description: "batches `batch_size` of them.\n\nUnlike a \"MapDataset\", which applies `f` sequentially, this dataset invokes up\nto `batch_size * num_parallel_batches` copies of `f` in parallel."
+}
+op {
name: "MapClear"
attr {
name: "capacity"
@@ -16044,6 +16257,57 @@ op {
description: "Builds a merged tensor such that\n\n```python\n merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]\n```\n\nFor example, if each `indices[m]` is scalar or vector, we have\n\n```python\n # Scalar indices:\n merged[indices[m], ...] = data[m][...]\n\n # Vector indices:\n merged[indices[m][i], ...] = data[m][i, ...]\n```\n\nEach `data[i].shape` must start with the corresponding `indices[i].shape`,\nand the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we\nmust have `data[i].shape = indices[i].shape + constant`. In terms of this\n`constant`, the output shape is\n\n merged.shape = [max(indices)] + constant\n\nValues may be merged in parallel, so if an index appears in both `indices[m][i]`\nand `indices[n][j]`, the result may be invalid. This differs from the normal\nDynamicStitch operator that defines the behavior in that case.\n\nFor example:\n\n```python\n indices[0] = 6\n indices[1] = [4, 1]\n indices[2] = [[5, 2], [0, 3]]\n data[0] = [61, 62]\n data[1] = [[41, 42], [11, 12]]\n data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]\n merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],\n [51, 52], [61, 62]]\n```\n\nThis method can be used to merge partitions created by `dynamic_partition`\nas illustrated on the following example:\n\n```python\n # Apply function (increments x_i) on elements for which a certain condition\n # apply (x_i != -1 in this example).\n x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])\n condition_mask=tf.not_equal(x,tf.constant(-1.))\n partitioned_data = tf.dynamic_partition(\n x, tf.cast(condition_mask, tf.int32) , 2)\n partitioned_data[1] = partitioned_data[1] + 1.0\n condition_indices = tf.dynamic_partition(\n tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)\n x = tf.dynamic_stitch(condition_indices, partitioned_data)\n # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain\n # unchanged.\n```\n\n<div style=\"width:70%; margin:auto; margin-bottom:10px; margin-top:20px;\">\n<img style=\"width:100%\" src=\"https://www.tensorflow.org/images/DynamicStitch.png\" alt>\n</div>"
}
op {
+ name: "ParallelInterleaveDataset"
+ input_arg {
+ name: "input_dataset"
+ type: DT_VARIANT
+ }
+ input_arg {
+ name: "other_arguments"
+ type_list_attr: "Targuments"
+ }
+ input_arg {
+ name: "cycle_length"
+ type: DT_INT64
+ }
+ input_arg {
+ name: "block_length"
+ type: DT_INT64
+ }
+ input_arg {
+ name: "sloppy"
+ type: DT_BOOL
+ }
+ output_arg {
+ name: "handle"
+ type: DT_VARIANT
+ }
+ attr {
+ name: "f"
+ type: "func"
+ description: "A function mapping elements of `input_dataset`, concatenated with\n`other_arguments`, to a Dataset variant that contains elements matching\n`output_types` and `output_shapes`."
+ }
+ attr {
+ name: "Targuments"
+ type: "list(type)"
+ has_minimum: true
+ }
+ attr {
+ name: "output_types"
+ type: "list(type)"
+ has_minimum: true
+ minimum: 1
+ }
+ attr {
+ name: "output_shapes"
+ type: "list(shape)"
+ has_minimum: true
+ minimum: 1
+ }
+ summary: "Creates a dataset that applies `f` to the outputs of `input_dataset`."
+ description: "The resulting dataset is similar to the `InterleaveDataset`, with the exception\nthat if retrieving the next value from a dataset would cause the requester to\nblock, it will skip that input dataset. This dataset is especially useful\nwhen loading data from a variable-latency datastores (e.g. HDFS, GCS), as it\nallows the training step to proceed so long as some data is available.\n\n!! WARNING !! This dataset is not deterministic!"
+}
+op {
name: "ParallelMapDataset"
input_arg {
name: "input_dataset"
@@ -23851,6 +24115,53 @@ op {
description: "The input `tags` and `values` must have the same shape. The generated summary\nhas a summary value for each tag-value pair in `tags` and `values`."
}
op {
+ name: "ScanDataset"
+ input_arg {
+ name: "input_dataset"
+ type: DT_VARIANT
+ }
+ input_arg {
+ name: "initial_state"
+ type_list_attr: "Tstate"
+ }
+ input_arg {
+ name: "other_arguments"
+ type_list_attr: "Targuments"
+ }
+ output_arg {
+ name: "handle"
+ type: DT_VARIANT
+ }
+ attr {
+ name: "f"
+ type: "func"
+ }
+ attr {
+ name: "Tstate"
+ type: "list(type)"
+ has_minimum: true
+ minimum: 1
+ }
+ attr {
+ name: "Targuments"
+ type: "list(type)"
+ has_minimum: true
+ }
+ attr {
+ name: "output_types"
+ type: "list(type)"
+ has_minimum: true
+ minimum: 1
+ }
+ attr {
+ name: "output_shapes"
+ type: "list(shape)"
+ has_minimum: true
+ minimum: 1
+ }
+ summary: "Creates a dataset successively reduces `f` over the elements of `input_dataset`."
+}
+op {
name: "ScatterAdd"
input_arg {
name: "ref"
@@ -25045,6 +25356,21 @@ op {
summary: "Computes gradients for the scaled exponential linear (Selu) operation."
}
op {
+ name: "SerializeIterator"
+ input_arg {
+ name: "resource_handle"
+ description: "A handle to an iterator resource."
+ type: DT_RESOURCE
+ }
+ output_arg {
+ name: "serialized"
+ description: "A variant tensor storing the state of the iterator contained in the\nresource."
+ type: DT_VARIANT
+ }
+ summary: "Converts the given `resource_handle` representing an iterator to a variant tensor."
+ is_stateful: true
+}
+op {
name: "SerializeManySparse"
input_arg {
name: "sparse_indices"
@@ -28955,6 +29281,42 @@ op {
summary: "Splits a tensor into `num_split` tensors along one dimension."
}
op {
+ name: "SqlDataset"
+ input_arg {
+ name: "driver_name"
+ description: "The database type. Currently, the only supported type is \'sqlite\'."
+ type: DT_STRING
+ }
+ input_arg {
+ name: "data_source_name"
+ description: "A connection string to connect to the database."
+ type: DT_STRING
+ }
+ input_arg {
+ name: "query"
+ description: "A SQL query to execute."
+ type: DT_STRING
+ }
+ output_arg {
+ name: "handle"
+ type: DT_VARIANT
+ }
+ attr {
+ name: "output_types"
+ type: "list(type)"
+ has_minimum: true
+ minimum: 1
+ }
+ attr {
+ name: "output_shapes"
+ type: "list(shape)"
+ has_minimum: true
+ minimum: 1
+ }
+ summary: "Creates a dataset that executes a SQL query and emits rows of the result set."
+ is_stateful: true
+}
+op {
name: "Sqrt"
input_arg {
name: "x"