aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-01-12 16:48:25 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-01-12 16:52:32 -0800
commitd326cf2019ba5b3a63393f8e809a5a5d41e6c8b0 (patch)
treeefbc65f4ab94592a4a9c42a83fd4389bc5fb9a1c
parent01380e3d2f344e67c1b01d1920fe599e86c64f08 (diff)
Go: Update generated wrapper functions for TensorFlow ops.
PiperOrigin-RevId: 181810382
-rw-r--r--tensorflow/go/op/wrappers.go34184
1 files changed, 17092 insertions, 17092 deletions
diff --git a/tensorflow/go/op/wrappers.go b/tensorflow/go/op/wrappers.go
index 35e3389ddf..39aa39f1c8 100644
--- a/tensorflow/go/op/wrappers.go
+++ b/tensorflow/go/op/wrappers.go
@@ -38,184 +38,252 @@ func makeOutputList(op *tf.Operation, start int, output string) ([]tf.Output, in
return list, start + size, nil
}
-// Outputs a `tf.Event` protocol buffer.
-//
-// When CreateSummaryDbWriter is being used, this op can be useful for
-// importing data from event logs.
+// WriteImageSummaryAttr is an optional argument to WriteImageSummary.
+type WriteImageSummaryAttr func(optionalAttr)
+
+// WriteImageSummaryMaxImages sets the optional max_images attribute to value.
//
-// Arguments:
-// writer: A handle to a summary writer.
-// event: A string containing a binary-encoded tf.Event proto.
+// value: Max number of batch elements to generate images for.
+// If not specified, defaults to 3
//
-// Returns the created operation.
-func ImportEvent(scope *Scope, writer tf.Output, event tf.Output) (o *tf.Operation) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "ImportEvent",
- Input: []tf.Input{
- writer, event,
- },
+// REQUIRES: value >= 1
+func WriteImageSummaryMaxImages(value int64) WriteImageSummaryAttr {
+ return func(m optionalAttr) {
+ m["max_images"] = value
}
- return scope.AddOperation(opspec)
}
-// Outputs a `Summary` protocol buffer with a tensor.
+// Writes a `Summary` protocol buffer with images.
+//
+// The summary has up to `max_images` summary values containing images. The
+// images are built from `tensor` which must be 4-D with shape `[batch_size,
+// height, width, channels]` and where `channels` can be:
+//
+// * 1: `tensor` is interpreted as Grayscale.
+// * 3: `tensor` is interpreted as RGB.
+// * 4: `tensor` is interpreted as RGBA.
+//
+// The images have the same number of channels as the input tensor. For float
+// input, the values are normalized one image at a time to fit in the range
+// `[0, 255]`. `uint8` values are unchanged. The op uses two different
+// normalization algorithms:
+//
+// * If the input values are all positive, they are rescaled so the largest one
+// is 255.
+//
+// * If any input value is negative, the values are shifted so input value 0.0
+// is at 127. They are then rescaled so that either the smallest value is 0,
+// or the largest one is 255.
+//
+// The `tag` argument is a scalar `Tensor` of type `string`. It is used to
+// build the `tag` of the summary values:
+//
+// * If `max_images` is 1, the summary value tag is '*tag*/image'.
+// * If `max_images` is greater than 1, the summary value tags are
+// generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
+//
+// The `bad_color` argument is the color to use in the generated images for
+// non-finite input values. It is a `unit8` 1-D tensor of length `channels`.
+// Each element must be in the range `[0, 255]` (It represents the value of a
+// pixel in the output image). Non-finite values in the input tensor are
+// replaced by this tensor in the output image. The default value is the color
+// red.
//
// Arguments:
// writer: A handle to a summary writer.
// step: The step to write the summary for.
-// tensor: A tensor to serialize.
-// tag: The summary's tag.
-// summary_metadata: Serialized SummaryMetadata protocol buffer containing
-// plugin-related metadata for this summary.
+// tag: Scalar. Used to build the `tag` attribute of the summary values.
+// tensor: 4-D of shape `[batch_size, height, width, channels]` where
+// `channels` is 1, 3, or 4.
+// bad_color: Color to use for pixels with non-finite values.
//
// Returns the created operation.
-func WriteSummary(scope *Scope, writer tf.Output, step tf.Output, tensor tf.Output, tag tf.Output, summary_metadata tf.Output) (o *tf.Operation) {
+func WriteImageSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, tensor tf.Output, bad_color tf.Output, optional ...WriteImageSummaryAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "WriteSummary",
+ Type: "WriteImageSummary",
Input: []tf.Input{
- writer, step, tensor, tag, summary_metadata,
+ writer, step, tag, tensor, bad_color,
},
+ Attrs: attrs,
}
return scope.AddOperation(opspec)
}
-// Flushes and closes the summary writer.
+// Partitions `data` into `num_partitions` tensors using indices from `partitions`.
//
-// Also removes it from the resource manager. To reopen, use another
-// CreateSummaryFileWriter op.
+// For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]`
+// becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = i`
+// are placed in `outputs[i]` in lexicographic order of `js`, and the first
+// dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`.
+// In detail,
+//
+// ```python
+// outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]
+//
+// outputs[i] = pack([data[js, ...] for js if partitions[js] == i])
+// ```
+//
+// `data.shape` must start with `partitions.shape`.
+//
+// For example:
+//
+// ```python
+// # Scalar partitions.
+// partitions = 1
+// num_partitions = 2
+// data = [10, 20]
+// outputs[0] = [] # Empty with shape [0, 2]
+// outputs[1] = [[10, 20]]
+//
+// # Vector partitions.
+// partitions = [0, 0, 1, 1, 0]
+// num_partitions = 2
+// data = [10, 20, 30, 40, 50]
+// outputs[0] = [10, 20, 50]
+// outputs[1] = [30, 40]
+// ```
+//
+// See `dynamic_stitch` for an example on how to merge partitions back.
+//
+// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+// <img style="width:100%" src="https://www.tensorflow.org/images/DynamicPartition.png" alt>
+// </div>
//
// Arguments:
-// writer: A handle to the summary writer resource.
//
-// Returns the created operation.
-func CloseSummaryWriter(scope *Scope, writer tf.Output) (o *tf.Operation) {
+// partitions: Any shape. Indices in the range `[0, num_partitions)`.
+// num_partitions: The number of partitions to output.
+func DynamicPartition(scope *Scope, data tf.Output, partitions tf.Output, num_partitions int64) (outputs []tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"num_partitions": num_partitions}
opspec := tf.OpSpec{
- Type: "CloseSummaryWriter",
+ Type: "DynamicPartition",
Input: []tf.Input{
- writer,
+ data, partitions,
},
+ Attrs: attrs,
}
- return scope.AddOperation(opspec)
-}
-
-// Flushes the writer's unwritten events.
-//
-// Arguments:
-// writer: A handle to the summary writer resource.
-//
-// Returns the created operation.
-func FlushSummaryWriter(scope *Scope, writer tf.Output) (o *tf.Operation) {
+ op := scope.AddOperation(opspec)
if scope.Err() != nil {
return
}
- opspec := tf.OpSpec{
- Type: "FlushSummaryWriter",
- Input: []tf.Input{
- writer,
- },
+ var idx int
+ var err error
+ if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
+ scope.UpdateErr("DynamicPartition", err)
+ return
}
- return scope.AddOperation(opspec)
+ return outputs
}
-// FakeQuantWithMinMaxVarsPerChannelGradientAttr is an optional argument to FakeQuantWithMinMaxVarsPerChannelGradient.
-type FakeQuantWithMinMaxVarsPerChannelGradientAttr func(optionalAttr)
+// MutableHashTableOfTensorsV2Attr is an optional argument to MutableHashTableOfTensorsV2.
+type MutableHashTableOfTensorsV2Attr func(optionalAttr)
-// FakeQuantWithMinMaxVarsPerChannelGradientNumBits sets the optional num_bits attribute to value.
+// MutableHashTableOfTensorsV2Container sets the optional container attribute to value.
//
-// value: The bitwidth of the quantization; between 2 and 8, inclusive.
-// If not specified, defaults to 8
-func FakeQuantWithMinMaxVarsPerChannelGradientNumBits(value int64) FakeQuantWithMinMaxVarsPerChannelGradientAttr {
+// value: If non-empty, this table is placed in the given container.
+// Otherwise, a default container is used.
+// If not specified, defaults to ""
+func MutableHashTableOfTensorsV2Container(value string) MutableHashTableOfTensorsV2Attr {
return func(m optionalAttr) {
- m["num_bits"] = value
+ m["container"] = value
}
}
-// FakeQuantWithMinMaxVarsPerChannelGradientNarrowRange sets the optional narrow_range attribute to value.
+// MutableHashTableOfTensorsV2SharedName sets the optional shared_name attribute to value.
//
-// value: Whether to quantize into 2^num_bits - 1 distinct values.
+// value: If non-empty, this table is shared under the given name across
+// multiple sessions.
+// If not specified, defaults to ""
+func MutableHashTableOfTensorsV2SharedName(value string) MutableHashTableOfTensorsV2Attr {
+ return func(m optionalAttr) {
+ m["shared_name"] = value
+ }
+}
+
+// MutableHashTableOfTensorsV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
// If not specified, defaults to false
-func FakeQuantWithMinMaxVarsPerChannelGradientNarrowRange(value bool) FakeQuantWithMinMaxVarsPerChannelGradientAttr {
+func MutableHashTableOfTensorsV2UseNodeNameSharing(value bool) MutableHashTableOfTensorsV2Attr {
return func(m optionalAttr) {
- m["narrow_range"] = value
+ m["use_node_name_sharing"] = value
}
}
-// Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation.
-//
-// Arguments:
-// gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation,
-// shape one of: `[d]`, `[b, d]`, `[b, h, w, d]`.
-// inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape
-// same as `gradients`.
-// min, max: Quantization interval, floats of shape `[d]`.
+// MutableHashTableOfTensorsV2ValueShape sets the optional value_shape attribute to value.
+// If not specified, defaults to <>
+func MutableHashTableOfTensorsV2ValueShape(value tf.Shape) MutableHashTableOfTensorsV2Attr {
+ return func(m optionalAttr) {
+ m["value_shape"] = value
+ }
+}
+
+// Creates an empty hash table.
//
+// This op creates a mutable hash table, specifying the type of its keys and
+// values. Each value must be a vector. Data can be inserted into the table using
+// the insert operations. It does not support the initialization operation.
//
+// Arguments:
+// key_dtype: Type of the table keys.
+// value_dtype: Type of the table values.
//
-// Returns Backpropagated gradients w.r.t. inputs, shape same as
-// `inputs`:
-// `gradients * (inputs >= min && inputs <= max)`.Backpropagated gradients w.r.t. min parameter, shape `[d]`:
-// `sum_per_d(gradients * (inputs < min))`.Backpropagated gradients w.r.t. max parameter, shape `[d]`:
-// `sum_per_d(gradients * (inputs > max))`.
-func FakeQuantWithMinMaxVarsPerChannelGradient(scope *Scope, gradients tf.Output, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsPerChannelGradientAttr) (backprops_wrt_input tf.Output, backprop_wrt_min tf.Output, backprop_wrt_max tf.Output) {
+// Returns Handle to a table.
+func MutableHashTableOfTensorsV2(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, optional ...MutableHashTableOfTensorsV2Attr) (table_handle tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"key_dtype": key_dtype, "value_dtype": value_dtype}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "FakeQuantWithMinMaxVarsPerChannelGradient",
- Input: []tf.Input{
- gradients, inputs, min, max,
- },
+ Type: "MutableHashTableOfTensorsV2",
+
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
+ return op.Output(0)
}
-// FakeQuantWithMinMaxVarsAttr is an optional argument to FakeQuantWithMinMaxVars.
-type FakeQuantWithMinMaxVarsAttr func(optionalAttr)
-
-// FakeQuantWithMinMaxVarsNumBits sets the optional num_bits attribute to value.
-// If not specified, defaults to 8
-func FakeQuantWithMinMaxVarsNumBits(value int64) FakeQuantWithMinMaxVarsAttr {
- return func(m optionalAttr) {
- m["num_bits"] = value
- }
-}
+// ResourceApplyProximalAdagradAttr is an optional argument to ResourceApplyProximalAdagrad.
+type ResourceApplyProximalAdagradAttr func(optionalAttr)
-// FakeQuantWithMinMaxVarsNarrowRange sets the optional narrow_range attribute to value.
+// ResourceApplyProximalAdagradUseLocking sets the optional use_locking attribute to value.
+//
+// value: If True, updating of the var and accum tensors will be protected by
+// a lock; otherwise the behavior is undefined, but may exhibit less contention.
// If not specified, defaults to false
-func FakeQuantWithMinMaxVarsNarrowRange(value bool) FakeQuantWithMinMaxVarsAttr {
+func ResourceApplyProximalAdagradUseLocking(value bool) ResourceApplyProximalAdagradAttr {
return func(m optionalAttr) {
- m["narrow_range"] = value
+ m["use_locking"] = value
}
}
-// Fake-quantize the 'inputs' tensor of type float via global float scalars `min`
+// Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.
//
-// and `max` to 'outputs' tensor of same shape as `inputs`.
+// accum += grad * grad
+// prox_v = var - lr * grad * (1 / sqrt(accum))
+// var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
//
-// `[min; max]` define the clamping range for the `inputs` data.
-// `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
-// when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
-// then de-quantized and output as floats in `[min; max]` interval.
-// `num_bits` is the bitwidth of the quantization; between 2 and 8, inclusive.
+// Arguments:
+// var_: Should be from a Variable().
+// accum: Should be from a Variable().
+// lr: Scaling factor. Must be a scalar.
+// l1: L1 regularization. Must be a scalar.
+// l2: L2 regularization. Must be a scalar.
+// grad: The gradient.
//
-// This operation has a gradient and thus allows for training `min` and `max`
-// values.
-func FakeQuantWithMinMaxVars(scope *Scope, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsAttr) (outputs tf.Output) {
+// Returns the created operation.
+func ResourceApplyProximalAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, optional ...ResourceApplyProximalAdagradAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
@@ -224,708 +292,383 @@ func FakeQuantWithMinMaxVars(scope *Scope, inputs tf.Output, min tf.Output, max
a(attrs)
}
opspec := tf.OpSpec{
- Type: "FakeQuantWithMinMaxVars",
+ Type: "ResourceApplyProximalAdagrad",
Input: []tf.Input{
- inputs, min, max,
+ var_, accum, lr, l1, l2, grad,
},
Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// QuantizedInstanceNormAttr is an optional argument to QuantizedInstanceNorm.
-type QuantizedInstanceNormAttr func(optionalAttr)
-
-// QuantizedInstanceNormOutputRangeGiven sets the optional output_range_given attribute to value.
-//
-// value: If True, `given_y_min` and `given_y_min`
-// and `given_y_max` are used as the output range. Otherwise,
-// the implementation computes the output range.
-// If not specified, defaults to false
-func QuantizedInstanceNormOutputRangeGiven(value bool) QuantizedInstanceNormAttr {
- return func(m optionalAttr) {
- m["output_range_given"] = value
- }
-}
+// MutableHashTableV2Attr is an optional argument to MutableHashTableV2.
+type MutableHashTableV2Attr func(optionalAttr)
-// QuantizedInstanceNormGivenYMin sets the optional given_y_min attribute to value.
+// MutableHashTableV2Container sets the optional container attribute to value.
//
-// value: Output in `y_min` if `output_range_given` is True.
-// If not specified, defaults to 0
-func QuantizedInstanceNormGivenYMin(value float32) QuantizedInstanceNormAttr {
+// value: If non-empty, this table is placed in the given container.
+// Otherwise, a default container is used.
+// If not specified, defaults to ""
+func MutableHashTableV2Container(value string) MutableHashTableV2Attr {
return func(m optionalAttr) {
- m["given_y_min"] = value
+ m["container"] = value
}
}
-// QuantizedInstanceNormGivenYMax sets the optional given_y_max attribute to value.
+// MutableHashTableV2SharedName sets the optional shared_name attribute to value.
//
-// value: Output in `y_max` if `output_range_given` is True.
-// If not specified, defaults to 0
-func QuantizedInstanceNormGivenYMax(value float32) QuantizedInstanceNormAttr {
+// value: If non-empty, this table is shared under the given name across
+// multiple sessions.
+// If not specified, defaults to ""
+func MutableHashTableV2SharedName(value string) MutableHashTableV2Attr {
return func(m optionalAttr) {
- m["given_y_max"] = value
+ m["shared_name"] = value
}
}
-// QuantizedInstanceNormVarianceEpsilon sets the optional variance_epsilon attribute to value.
+// MutableHashTableV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
//
-// value: A small float number to avoid dividing by 0.
-// If not specified, defaults to 1e-05
-func QuantizedInstanceNormVarianceEpsilon(value float32) QuantizedInstanceNormAttr {
+// value: If true and shared_name is empty, the table is shared
+// using the node name.
+// If not specified, defaults to false
+func MutableHashTableV2UseNodeNameSharing(value bool) MutableHashTableV2Attr {
return func(m optionalAttr) {
- m["variance_epsilon"] = value
+ m["use_node_name_sharing"] = value
}
}
-// QuantizedInstanceNormMinSeparation sets the optional min_separation attribute to value.
+// Creates an empty hash table.
//
-// value: Minimum value of `y_max - y_min`
-// If not specified, defaults to 0.001
-func QuantizedInstanceNormMinSeparation(value float32) QuantizedInstanceNormAttr {
- return func(m optionalAttr) {
- m["min_separation"] = value
- }
-}
-
-// Quantized Instance normalization.
+// This op creates a mutable hash table, specifying the type of its keys and
+// values. Each value must be a scalar. Data can be inserted into the table using
+// the insert operations. It does not support the initialization operation.
//
// Arguments:
-// x: A 4D input Tensor.
-// x_min: The value represented by the lowest quantized input.
-// x_max: The value represented by the highest quantized input.
+// key_dtype: Type of the table keys.
+// value_dtype: Type of the table values.
//
-// Returns A 4D Tensor.The value represented by the lowest quantized output.The value represented by the highest quantized output.
-func QuantizedInstanceNorm(scope *Scope, x tf.Output, x_min tf.Output, x_max tf.Output, optional ...QuantizedInstanceNormAttr) (y tf.Output, y_min tf.Output, y_max tf.Output) {
+// Returns Handle to a table.
+func MutableHashTableV2(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, optional ...MutableHashTableV2Attr) (table_handle tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"key_dtype": key_dtype, "value_dtype": value_dtype}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "QuantizedInstanceNorm",
- Input: []tf.Input{
- x, x_min, x_max,
- },
+ Type: "MutableHashTableV2",
+
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
+ return op.Output(0)
}
-// QuantizeAndDequantizeAttr is an optional argument to QuantizeAndDequantize.
-type QuantizeAndDequantizeAttr func(optionalAttr)
-
-// QuantizeAndDequantizeSignedInput sets the optional signed_input attribute to value.
-// If not specified, defaults to true
-func QuantizeAndDequantizeSignedInput(value bool) QuantizeAndDequantizeAttr {
- return func(m optionalAttr) {
- m["signed_input"] = value
- }
-}
+// MapUnstageNoKeyAttr is an optional argument to MapUnstageNoKey.
+type MapUnstageNoKeyAttr func(optionalAttr)
-// QuantizeAndDequantizeNumBits sets the optional num_bits attribute to value.
-// If not specified, defaults to 8
-func QuantizeAndDequantizeNumBits(value int64) QuantizeAndDequantizeAttr {
+// MapUnstageNoKeyCapacity sets the optional capacity attribute to value.
+// If not specified, defaults to 0
+//
+// REQUIRES: value >= 0
+func MapUnstageNoKeyCapacity(value int64) MapUnstageNoKeyAttr {
return func(m optionalAttr) {
- m["num_bits"] = value
+ m["capacity"] = value
}
}
-// QuantizeAndDequantizeRangeGiven sets the optional range_given attribute to value.
-// If not specified, defaults to false
-func QuantizeAndDequantizeRangeGiven(value bool) QuantizeAndDequantizeAttr {
+// MapUnstageNoKeyMemoryLimit sets the optional memory_limit attribute to value.
+// If not specified, defaults to 0
+//
+// REQUIRES: value >= 0
+func MapUnstageNoKeyMemoryLimit(value int64) MapUnstageNoKeyAttr {
return func(m optionalAttr) {
- m["range_given"] = value
+ m["memory_limit"] = value
}
}
-// QuantizeAndDequantizeInputMin sets the optional input_min attribute to value.
-// If not specified, defaults to 0
-func QuantizeAndDequantizeInputMin(value float32) QuantizeAndDequantizeAttr {
+// MapUnstageNoKeyContainer sets the optional container attribute to value.
+// If not specified, defaults to ""
+func MapUnstageNoKeyContainer(value string) MapUnstageNoKeyAttr {
return func(m optionalAttr) {
- m["input_min"] = value
+ m["container"] = value
}
}
-// QuantizeAndDequantizeInputMax sets the optional input_max attribute to value.
-// If not specified, defaults to 0
-func QuantizeAndDequantizeInputMax(value float32) QuantizeAndDequantizeAttr {
+// MapUnstageNoKeySharedName sets the optional shared_name attribute to value.
+// If not specified, defaults to ""
+func MapUnstageNoKeySharedName(value string) MapUnstageNoKeyAttr {
return func(m optionalAttr) {
- m["input_max"] = value
+ m["shared_name"] = value
}
}
-// Use QuantizeAndDequantizeV2 instead.
+// Op removes and returns a random (key, value)
//
-// DEPRECATED at GraphDef version 22: Replaced by QuantizeAndDequantizeV2
-func QuantizeAndDequantize(scope *Scope, input tf.Output, optional ...QuantizeAndDequantizeAttr) (output tf.Output) {
+// from the underlying container. If the underlying container
+// does not contain elements, the op will block until it does.
+func MapUnstageNoKey(scope *Scope, indices tf.Output, dtypes []tf.DataType, optional ...MapUnstageNoKeyAttr) (key tf.Output, values []tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"dtypes": dtypes}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "QuantizeAndDequantize",
+ Type: "MapUnstageNoKey",
Input: []tf.Input{
- input,
+ indices,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ if scope.Err() != nil {
+ return
+ }
+ var idx int
+ var err error
+ key = op.Output(idx)
+ if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
+ scope.UpdateErr("MapUnstageNoKey", err)
+ return
+ }
+ return key, values
}
-// OneHotAttr is an optional argument to OneHot.
-type OneHotAttr func(optionalAttr)
+// HashTableV2Attr is an optional argument to HashTableV2.
+type HashTableV2Attr func(optionalAttr)
-// OneHotAxis sets the optional axis attribute to value.
+// HashTableV2Container sets the optional container attribute to value.
//
-// value: The axis to fill (default: -1, a new inner-most axis).
-// If not specified, defaults to -1
-func OneHotAxis(value int64) OneHotAttr {
+// value: If non-empty, this table is placed in the given container.
+// Otherwise, a default container is used.
+// If not specified, defaults to ""
+func HashTableV2Container(value string) HashTableV2Attr {
return func(m optionalAttr) {
- m["axis"] = value
+ m["container"] = value
}
}
-// Returns a one-hot tensor.
-//
-// The locations represented by indices in `indices` take value `on_value`,
-// while all other locations take value `off_value`.
-//
-// If the input `indices` is rank `N`, the output will have rank `N+1`,
-// The new axis is created at dimension `axis` (default: the new axis is
-// appended at the end).
-//
-// If `indices` is a scalar the output shape will be a vector of length `depth`.
-//
-// If `indices` is a vector of length `features`, the output shape will be:
-// ```
-// features x depth if axis == -1
-// depth x features if axis == 0
-// ```
-//
-// If `indices` is a matrix (batch) with shape `[batch, features]`,
-// the output shape will be:
-// ```
-// batch x features x depth if axis == -1
-// batch x depth x features if axis == 1
-// depth x batch x features if axis == 0
-// ```
-//
-//
-// Examples
-// =========
-//
-// Suppose that
-//
-// ```
-// indices = [0, 2, -1, 1]
-// depth = 3
-// on_value = 5.0
-// off_value = 0.0
-// axis = -1
-// ```
-//
-// Then output is `[4 x 3]`:
-//
-// ```output =
-// [5.0 0.0 0.0] // one_hot(0)
-// [0.0 0.0 5.0] // one_hot(2)
-// [0.0 0.0 0.0] // one_hot(-1)
-// [0.0 5.0 0.0] // one_hot(1)
-// ```
-//
-// Suppose that
-//
-// ```
-// indices = [0, 2, -1, 1]
-// depth = 3
-// on_value = 0.0
-// off_value = 3.0
-// axis = 0
-// ```
-//
-// Then output is `[3 x 4]`:
-//
-// ```output =
-// [0.0 3.0 3.0 3.0]
-// [3.0 3.0 3.0 0.0]
-// [3.0 3.0 3.0 3.0]
-// [3.0 0.0 3.0 3.0]
-// // ^ one_hot(0)
-// // ^ one_hot(2)
-// // ^ one_hot(-1)
-// // ^ one_hot(1)
-// ```
-// Suppose that
+// HashTableV2SharedName sets the optional shared_name attribute to value.
//
-// ```
-// indices = [[0, 2], [1, -1]]
-// depth = 3
-// on_value = 1.0
-// off_value = 0.0
-// axis = -1
-// ```
+// value: If non-empty, this table is shared under the given name across
+// multiple sessions.
+// If not specified, defaults to ""
+func HashTableV2SharedName(value string) HashTableV2Attr {
+ return func(m optionalAttr) {
+ m["shared_name"] = value
+ }
+}
+
+// HashTableV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
//
-// Then output is `[2 x 2 x 3]`:
+// value: If true and shared_name is empty, the table is shared
+// using the node name.
+// If not specified, defaults to false
+func HashTableV2UseNodeNameSharing(value bool) HashTableV2Attr {
+ return func(m optionalAttr) {
+ m["use_node_name_sharing"] = value
+ }
+}
+
+// Creates a non-initialized hash table.
//
-// ```output =
-// [
-// [1.0, 0.0, 0.0] // one_hot(0)
-// [0.0, 0.0, 1.0] // one_hot(2)
-// ][
-// [0.0, 1.0, 0.0] // one_hot(1)
-// [0.0, 0.0, 0.0] // one_hot(-1)
-// ]```
+// This op creates a hash table, specifying the type of its keys and values.
+// Before using the table you will have to initialize it. After initialization the
+// table will be immutable.
//
// Arguments:
-// indices: A tensor of indices.
-// depth: A scalar defining the depth of the one hot dimension.
-// on_value: A scalar defining the value to fill in output when `indices[j] = i`.
-// off_value: A scalar defining the value to fill in output when `indices[j] != i`.
+// key_dtype: Type of the table keys.
+// value_dtype: Type of the table values.
//
-// Returns The one-hot tensor.
-func OneHot(scope *Scope, indices tf.Output, depth tf.Output, on_value tf.Output, off_value tf.Output, optional ...OneHotAttr) (output tf.Output) {
+// Returns Handle to a table.
+func HashTableV2(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, optional ...HashTableV2Attr) (table_handle tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"key_dtype": key_dtype, "value_dtype": value_dtype}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "OneHot",
- Input: []tf.Input{
- indices, depth, on_value, off_value,
- },
+ Type: "HashTableV2",
+
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Bitcasts a tensor from one type to another without copying data.
-//
-// Given a tensor `input`, this operation returns a tensor that has the same buffer
-// data as `input` with datatype `type`.
+// Replaces the contents of the table with the specified keys and values.
//
-// If the input datatype `T` is larger than the output datatype `type` then the
-// shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)].
+// The tensor `keys` must be of the same type as the keys of the table.
+// The tensor `values` must be of the type of the table values.
//
-// If `T` is smaller than `type`, the operator requires that the rightmost
-// dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from
-// [..., sizeof(`type`)/sizeof(`T`)] to [...].
+// Arguments:
+// table_handle: Handle to the table.
+// keys: Any shape. Keys to look up.
+// values: Values to associate with keys.
//
-// *NOTE*: Bitcast is implemented as a low-level cast, so machines with different
-// endian orderings will give different results.
-func Bitcast(scope *Scope, input tf.Output, type_ tf.DataType) (output tf.Output) {
+// Returns the created operation.
+func LookupTableImportV2(scope *Scope, table_handle tf.Output, keys tf.Output, values tf.Output) (o *tf.Operation) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"type": type_}
opspec := tf.OpSpec{
- Type: "Bitcast",
+ Type: "LookupTableImportV2",
Input: []tf.Input{
- input,
+ table_handle, keys, values,
},
- Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// Extract `patches` from `images` and put them in the "depth" output dimension.
-//
-// Arguments:
-// images: 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.
-// ksizes: The size of the sliding window for each dimension of `images`.
-// strides: 1-D of length 4. How far the centers of two consecutive patches are in
-// the images. Must be: `[1, stride_rows, stride_cols, 1]`.
-// rates: 1-D of length 4. Must be: `[1, rate_rows, rate_cols, 1]`. This is the
-// input stride, specifying how far two consecutive patch samples are in the
-// input. Equivalent to extracting patches with
-// `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by
-// subsampling them spatially by a factor of `rates`. This is equivalent to
-// `rate` in dilated (a.k.a. Atrous) convolutions.
-// padding: The type of padding algorithm to use.
-//
-// We specify the size-related attributes as:
-//
-// ```python
-// ksizes = [1, ksize_rows, ksize_cols, 1]
-// strides = [1, strides_rows, strides_cols, 1]
-// rates = [1, rates_rows, rates_cols, 1]
-// ```
+// Returns (x - y)(x - y) element-wise.
//
-// Returns 4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows *
-// ksize_cols * depth]` containing image patches with size
-// `ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension. Note
-// `out_rows` and `out_cols` are the dimensions of the output patches.
-func ExtractImagePatches(scope *Scope, images tf.Output, ksizes []int64, strides []int64, rates []int64, padding string) (patches tf.Output) {
+// *NOTE*: `SquaredDifference` supports broadcasting. More about broadcasting
+// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+func SquaredDifference(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"ksizes": ksizes, "strides": strides, "rates": rates, "padding": padding}
opspec := tf.OpSpec{
- Type: "ExtractImagePatches",
+ Type: "SquaredDifference",
Input: []tf.Input{
- images,
+ x, y,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// BatchToSpace for N-D tensors of type T.
+// Forwards the input to the output.
//
-// This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape
-// `block_shape + [batch]`, interleaves these blocks back into the grid defined by
-// the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as
-// the input. The spatial dimensions of this intermediate result are then
-// optionally cropped according to `crops` to produce the output. This is the
-// reverse of SpaceToBatch. See below for a precise description.
+// This operator represents the loop termination condition used by the
+// "pivot" switches of a loop.
//
// Arguments:
-// input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
-// where spatial_shape has M dimensions.
-// block_shape: 1-D with shape `[M]`, all values must be >= 1.
-// crops: 2-D with shape `[M, 2]`, all values must be >= 0.
-// `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input
-// dimension `i + 1`, which corresponds to spatial dimension `i`. It is
-// required that
-// `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.
-//
-// This operation is equivalent to the following steps:
-//
-// 1. Reshape `input` to `reshaped` of shape:
-// [block_shape[0], ..., block_shape[M-1],
-// batch / prod(block_shape),
-// input_shape[1], ..., input_shape[N-1]]
-//
-// 2. Permute dimensions of `reshaped` to produce `permuted` of shape
-// [batch / prod(block_shape),
-//
-// input_shape[1], block_shape[0],
-// ...,
-// input_shape[M], block_shape[M-1],
-//
-// input_shape[M+1], ..., input_shape[N-1]]
-//
-// 3. Reshape `permuted` to produce `reshaped_permuted` of shape
-// [batch / prod(block_shape),
-//
-// input_shape[1] * block_shape[0],
-// ...,
-// input_shape[M] * block_shape[M-1],
-//
-// input_shape[M+1],
-// ...,
-// input_shape[N-1]]
-//
-// 4. Crop the start and end of dimensions `[1, ..., M]` of
-// `reshaped_permuted` according to `crops` to produce the output of shape:
-// [batch / prod(block_shape),
-//
-// input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],
-// ...,
-// input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],
-//
-// input_shape[M+1], ..., input_shape[N-1]]
-//
-// Some examples:
-//
-// (1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and
-// `crops = [[0, 0], [0, 0]]`:
-//
-// ```
-// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
-// ```
-//
-// The output tensor has shape `[1, 2, 2, 1]` and value:
-//
-// ```
-// x = [[[[1], [2]], [[3], [4]]]]
-// ```
-//
-// (2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and
-// `crops = [[0, 0], [0, 0]]`:
-//
-// ```
-// [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
-// ```
-//
-// The output tensor has shape `[1, 2, 2, 3]` and value:
-//
-// ```
-// x = [[[[1, 2, 3], [4, 5, 6]],
-// [[7, 8, 9], [10, 11, 12]]]]
-// ```
-//
-// (3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and
-// `crops = [[0, 0], [0, 0]]`:
-//
-// ```
-// x = [[[[1], [3]], [[9], [11]]],
-// [[[2], [4]], [[10], [12]]],
-// [[[5], [7]], [[13], [15]]],
-// [[[6], [8]], [[14], [16]]]]
-// ```
-//
-// The output tensor has shape `[1, 4, 4, 1]` and value:
-//
-// ```
-// x = [[[1], [2], [3], [4]],
-// [[5], [6], [7], [8]],
-// [[9], [10], [11], [12]],
-// [[13], [14], [15], [16]]]
-// ```
-//
-// (4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and
-// `crops = [[0, 0], [2, 0]]`:
-//
-// ```
-// x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
-// [[[0], [2], [4]]], [[[0], [10], [12]]],
-// [[[0], [5], [7]]], [[[0], [13], [15]]],
-// [[[0], [6], [8]]], [[[0], [14], [16]]]]
-// ```
-//
-// The output tensor has shape `[2, 2, 4, 1]` and value:
+// input: A boolean scalar, representing the branch predicate of the Switch op.
//
-// ```
-// x = [[[[1], [2], [3], [4]],
-// [[5], [6], [7], [8]]],
-// [[[9], [10], [11], [12]],
-// [[13], [14], [15], [16]]]]
-// ```
-func BatchToSpaceND(scope *Scope, input tf.Output, block_shape tf.Output, crops tf.Output) (output tf.Output) {
+// Returns The same tensor as `input`.
+func LoopCond(scope *Scope, input tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "BatchToSpaceND",
+ Type: "LoopCond",
Input: []tf.Input{
- input, block_shape, crops,
+ input,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// SpaceToBatch for 4-D tensors of type T.
-//
-// This is a legacy version of the more general SpaceToBatchND.
-//
-// Zero-pads and then rearranges (permutes) blocks of spatial data into batch.
-// More specifically, this op outputs a copy of the input tensor where values from
-// the `height` and `width` dimensions are moved to the `batch` dimension. After
-// the zero-padding, both `height` and `width` of the input must be divisible by the
-// block size.
+// QuantizedMulAttr is an optional argument to QuantizedMul.
+type QuantizedMulAttr func(optionalAttr)
+
+// QuantizedMulToutput sets the optional Toutput attribute to value.
+// If not specified, defaults to DT_QINT32
+func QuantizedMulToutput(value tf.DataType) QuantizedMulAttr {
+ return func(m optionalAttr) {
+ m["Toutput"] = value
+ }
+}
+
+// Returns x * y element-wise, working on quantized buffers.
//
// Arguments:
-// input: 4-D with shape `[batch, height, width, depth]`.
-// paddings: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
-// the padding of the input with zeros across the spatial dimensions as follows:
//
-// paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]
-//
-// The effective spatial dimensions of the zero-padded input tensor will be:
-//
-// height_pad = pad_top + height + pad_bottom
-// width_pad = pad_left + width + pad_right
-//
-// The attr `block_size` must be greater than one. It indicates the block size.
-//
-// * Non-overlapping blocks of size `block_size x block size` in the height and
-// width dimensions are rearranged into the batch dimension at each location.
-// * The batch of the output tensor is `batch * block_size * block_size`.
-// * Both height_pad and width_pad must be divisible by block_size.
-//
-// The shape of the output will be:
-//
-// [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
-// depth]
-//
-// Some examples:
-//
-// (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:
-//
-// ```
-// x = [[[[1], [2]], [[3], [4]]]]
-// ```
-//
-// The output tensor has shape `[4, 1, 1, 1]` and value:
-//
-// ```
-// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
-// ```
-//
-// (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:
-//
-// ```
-// x = [[[[1, 2, 3], [4, 5, 6]],
-// [[7, 8, 9], [10, 11, 12]]]]
-// ```
-//
-// The output tensor has shape `[4, 1, 1, 3]` and value:
-//
-// ```
-// [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
-// ```
-//
-// (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:
-//
-// ```
-// x = [[[[1], [2], [3], [4]],
-// [[5], [6], [7], [8]],
-// [[9], [10], [11], [12]],
-// [[13], [14], [15], [16]]]]
-// ```
-//
-// The output tensor has shape `[4, 2, 2, 1]` and value:
-//
-// ```
-// x = [[[[1], [3]], [[9], [11]]],
-// [[[2], [4]], [[10], [12]]],
-// [[[5], [7]], [[13], [15]]],
-// [[[6], [8]], [[14], [16]]]]
-// ```
-//
-// (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2:
-//
-// ```
-// x = [[[[1], [2], [3], [4]],
-// [[5], [6], [7], [8]]],
-// [[[9], [10], [11], [12]],
-// [[13], [14], [15], [16]]]]
-// ```
//
-// The output tensor has shape `[8, 1, 2, 1]` and value:
-//
-// ```
-// x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
-// [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
-// ```
+// min_x: The float value that the lowest quantized `x` value represents.
+// max_x: The float value that the highest quantized `x` value represents.
+// min_y: The float value that the lowest quantized `y` value represents.
+// max_y: The float value that the highest quantized `y` value represents.
//
-// Among others, this operation is useful for reducing atrous convolution into
-// regular convolution.
+// Returns The float value that the lowest quantized output value represents.The float value that the highest quantized output value represents.
//
-func SpaceToBatch(scope *Scope, input tf.Output, paddings tf.Output, block_size int64) (output tf.Output) {
+// *NOTE*: `QuantizedMul` supports limited forms of broadcasting. More about
+// broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+func QuantizedMul(scope *Scope, x tf.Output, y tf.Output, min_x tf.Output, max_x tf.Output, min_y tf.Output, max_y tf.Output, optional ...QuantizedMulAttr) (z tf.Output, min_z tf.Output, max_z tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"block_size": block_size}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "SpaceToBatch",
+ Type: "QuantizedMul",
Input: []tf.Input{
- input, paddings,
+ x, y, min_x, max_x, min_y, max_y,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// QuantizeAndDequantizeV2Attr is an optional argument to QuantizeAndDequantizeV2.
-type QuantizeAndDequantizeV2Attr func(optionalAttr)
+// QuantizedMatMulAttr is an optional argument to QuantizedMatMul.
+type QuantizedMatMulAttr func(optionalAttr)
-// QuantizeAndDequantizeV2SignedInput sets the optional signed_input attribute to value.
-//
-// value: If the quantization is signed or unsigned.
-// If not specified, defaults to true
-func QuantizeAndDequantizeV2SignedInput(value bool) QuantizeAndDequantizeV2Attr {
+// QuantizedMatMulToutput sets the optional Toutput attribute to value.
+// If not specified, defaults to DT_QINT32
+func QuantizedMatMulToutput(value tf.DataType) QuantizedMatMulAttr {
return func(m optionalAttr) {
- m["signed_input"] = value
+ m["Toutput"] = value
}
}
-// QuantizeAndDequantizeV2NumBits sets the optional num_bits attribute to value.
+// QuantizedMatMulTransposeA sets the optional transpose_a attribute to value.
//
-// value: The bitwidth of the quantization.
-// If not specified, defaults to 8
-func QuantizeAndDequantizeV2NumBits(value int64) QuantizeAndDequantizeV2Attr {
+// value: If true, `a` is transposed before multiplication.
+// If not specified, defaults to false
+func QuantizedMatMulTransposeA(value bool) QuantizedMatMulAttr {
return func(m optionalAttr) {
- m["num_bits"] = value
+ m["transpose_a"] = value
}
}
-// QuantizeAndDequantizeV2RangeGiven sets the optional range_given attribute to value.
+// QuantizedMatMulTransposeB sets the optional transpose_b attribute to value.
//
-// value: If the range is given or should be computed from the tensor.
+// value: If true, `b` is transposed before multiplication.
// If not specified, defaults to false
-func QuantizeAndDequantizeV2RangeGiven(value bool) QuantizeAndDequantizeV2Attr {
+func QuantizedMatMulTransposeB(value bool) QuantizedMatMulAttr {
return func(m optionalAttr) {
- m["range_given"] = value
+ m["transpose_b"] = value
}
}
-// Quantizes then dequantizes a tensor.
-//
-// This op simulates the precision loss from the quantized forward pass by:
-// 1. Quantizing the tensor to fixed point numbers, which should match the target
-// quantization method when it is used in inference.
-// 2. Dequantizing it back to floating point numbers for the following ops, most
-// likely matmul.
-//
-// There are different ways to quantize. This version does not use the full range
-// of the output type, choosing to elide the lowest possible value for symmetry
-// (e.g., output range is -127 to 127, not -128 to 127 for signed 8 bit
-// quantization), so that 0.0 maps to 0.
-//
-// To perform this op, we first find the range of values in our tensor. The range
-// we use is always centered on 0, so we find m such that
-//
-// 1. m = max(abs(input_min), abs(input_max)) if range_given is true,
-// 2. m = max(abs(min_elem(input)), abs(max_elem(input))) otherwise.
-//
-// Our input tensor range is then [-m, m].
-//
-// Next, we choose our fixed-point quantization buckets, [min_fixed, max_fixed].
-// If signed_input is true, this is
-//
-// [min_fixed, max_fixed ] =
-// [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1].
-//
-// Otherwise, if signed_input is false, the fixed-point range is
-//
-// [min_fixed, max_fixed] = [0, (1 << num_bits) - 1].
-//
-// From this we compute our scaling factor, s:
-//
-// s = (max_fixed - min_fixed) / (2 * m).
-//
-// Now we can quantize and dequantize the elements of our tensor. An element e
-// is transformed into e':
-//
-// e' = (e * s).round_to_nearest() / s.
-//
-// Note that we have a different number of buckets in the signed vs. unsigned
-// cases. For example, if num_bits == 8, we get 254 buckets in the signed case
-// vs. 255 in the unsigned case.
-//
-// For example, suppose num_bits = 8 and m = 1. Then
+// QuantizedMatMulTactivation sets the optional Tactivation attribute to value.
//
-// [min_fixed, max_fixed] = [-127, 127], and
-// s = (127 + 127) / 2 = 127.
+// value: The type of output produced by activation function
+// following this operation.
+// If not specified, defaults to DT_QUINT8
+func QuantizedMatMulTactivation(value tf.DataType) QuantizedMatMulAttr {
+ return func(m optionalAttr) {
+ m["Tactivation"] = value
+ }
+}
+
+// Perform a quantized matrix multiplication of `a` by the matrix `b`.
//
-// Given the vector {-1, -0.5, 0, 0.3}, this is quantized to
-// {-127, -63, 0, 38}, and dequantized to {-1, -63.0/127, 0, 38.0/127}.
+// The inputs must be two-dimensional matrices and the inner dimension of
+// `a` (after being transposed if `transpose_a` is non-zero) must match the
+// outer dimension of `b` (after being transposed if `transposed_b` is
+// non-zero).
//
// Arguments:
-// input: Tensor to quantize and then dequantize.
-// input_min: If range_given, this is the min of the range, otherwise this input
-// will be ignored.
-// input_max: If range_given, this is the max of the range, otherwise this input
-// will be ignored.
-func QuantizeAndDequantizeV2(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, optional ...QuantizeAndDequantizeV2Attr) (output tf.Output) {
+// a: Must be a two-dimensional tensor.
+// b: Must be a two-dimensional tensor.
+// min_a: The float value that the lowest quantized `a` value represents.
+// max_a: The float value that the highest quantized `a` value represents.
+// min_b: The float value that the lowest quantized `b` value represents.
+// max_b: The float value that the highest quantized `b` value represents.
+//
+// Returns The float value that the lowest quantized output value represents.The float value that the highest quantized output value represents.
+func QuantizedMatMul(scope *Scope, a tf.Output, b tf.Output, min_a tf.Output, max_a tf.Output, min_b tf.Output, max_b tf.Output, optional ...QuantizedMatMulAttr) (out tf.Output, min_out tf.Output, max_out tf.Output) {
if scope.Err() != nil {
return
}
@@ -934,197 +677,114 @@ func QuantizeAndDequantizeV2(scope *Scope, input tf.Output, input_min tf.Output,
a(attrs)
}
opspec := tf.OpSpec{
- Type: "QuantizeAndDequantizeV2",
+ Type: "QuantizedMatMul",
Input: []tf.Input{
- input, input_min, input_max,
+ a, b, min_a, max_a, min_b, max_b,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// SpaceToBatch for N-D tensors of type T.
-//
-// This operation divides "spatial" dimensions `[1, ..., M]` of the input into a
-// grid of blocks of shape `block_shape`, and interleaves these blocks with the
-// "batch" dimension (0) such that in the output, the spatial dimensions
-// `[1, ..., M]` correspond to the position within the grid, and the batch
-// dimension combines both the position within a spatial block and the original
-// batch position. Prior to division into blocks, the spatial dimensions of the
-// input are optionally zero padded according to `paddings`. See below for a
-// precise description.
+// A placeholder op that passes through `input` when its output is not fed.
//
// Arguments:
-// input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
-// where spatial_shape has `M` dimensions.
-// block_shape: 1-D with shape `[M]`, all values must be >= 1.
-// paddings: 2-D with shape `[M, 2]`, all values must be >= 0.
-// `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension
-// `i + 1`, which corresponds to spatial dimension `i`. It is required that
-// `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.
-//
-// This operation is equivalent to the following steps:
-//
-// 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the
-// input according to `paddings` to produce `padded` of shape `padded_shape`.
-//
-// 2. Reshape `padded` to `reshaped_padded` of shape:
-//
-// [batch] +
-// [padded_shape[1] / block_shape[0],
-// block_shape[0],
-// ...,
-// padded_shape[M] / block_shape[M-1],
-// block_shape[M-1]] +
-// remaining_shape
-//
-// 3. Permute dimensions of `reshaped_padded` to produce
-// `permuted_reshaped_padded` of shape:
-//
-// block_shape +
-// [batch] +
-// [padded_shape[1] / block_shape[0],
-// ...,
-// padded_shape[M] / block_shape[M-1]] +
-// remaining_shape
-//
-// 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch
-// dimension, producing an output tensor of shape:
-//
-// [batch * prod(block_shape)] +
-// [padded_shape[1] / block_shape[0],
-// ...,
-// padded_shape[M] / block_shape[M-1]] +
-// remaining_shape
-//
-// Some examples:
-//
-// (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and
-// `paddings = [[0, 0], [0, 0]]`:
-//
-// ```
-// x = [[[[1], [2]], [[3], [4]]]]
-// ```
-//
-// The output tensor has shape `[4, 1, 1, 1]` and value:
-//
-// ```
-// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
-// ```
-//
-// (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and
-// `paddings = [[0, 0], [0, 0]]`:
-//
-// ```
-// x = [[[[1, 2, 3], [4, 5, 6]],
-// [[7, 8, 9], [10, 11, 12]]]]
-// ```
-//
-// The output tensor has shape `[4, 1, 1, 3]` and value:
-//
-// ```
-// [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
-// ```
-//
-// (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and
-// `paddings = [[0, 0], [0, 0]]`:
-//
-// ```
-// x = [[[[1], [2], [3], [4]],
-// [[5], [6], [7], [8]],
-// [[9], [10], [11], [12]],
-// [[13], [14], [15], [16]]]]
-// ```
-//
-// The output tensor has shape `[4, 2, 2, 1]` and value:
+// input: The default value to produce when `output` is not fed.
+// shape: The (possibly partial) shape of the tensor.
//
-// ```
-// x = [[[[1], [3]], [[9], [11]]],
-// [[[2], [4]], [[10], [12]]],
-// [[[5], [7]], [[13], [15]]],
-// [[[6], [8]], [[14], [16]]]]
-// ```
+// Returns A placeholder tensor that defaults to `input` if it is not fed.
+func PlaceholderWithDefault(scope *Scope, input tf.Output, shape tf.Shape) (output tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{"shape": shape}
+ opspec := tf.OpSpec{
+ Type: "PlaceholderWithDefault",
+ Input: []tf.Input{
+ input,
+ },
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
+// Returns the complex conjugate of a complex number.
//
-// (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and
-// paddings = `[[0, 0], [2, 0]]`:
+// Given a tensor `input` of complex numbers, this operation returns a tensor of
+// complex numbers that are the complex conjugate of each element in `input`. The
+// complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
+// real part and *b* is the imaginary part.
//
-// ```
-// x = [[[[1], [2], [3], [4]],
-// [[5], [6], [7], [8]]],
-// [[[9], [10], [11], [12]],
-// [[13], [14], [15], [16]]]]
-// ```
+// The complex conjugate returned by this operation is of the form \\(a - bj\\).
//
-// The output tensor has shape `[8, 1, 3, 1]` and value:
+// For example:
//
// ```
-// x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
-// [[[0], [2], [4]]], [[[0], [10], [12]]],
-// [[[0], [5], [7]]], [[[0], [13], [15]]],
-// [[[0], [6], [8]]], [[[0], [14], [16]]]]
+// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
+// tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
// ```
-//
-// Among others, this operation is useful for reducing atrous convolution into
-// regular convolution.
-func SpaceToBatchND(scope *Scope, input tf.Output, block_shape tf.Output, paddings tf.Output) (output tf.Output) {
+func Conj(scope *Scope, input tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "SpaceToBatchND",
+ Type: "Conj",
Input: []tf.Input{
- input, block_shape, paddings,
+ input,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// SqueezeAttr is an optional argument to Squeeze.
-type SqueezeAttr func(optionalAttr)
+// ResourceSparseApplyMomentumAttr is an optional argument to ResourceSparseApplyMomentum.
+type ResourceSparseApplyMomentumAttr func(optionalAttr)
-// SqueezeAxis sets the optional axis attribute to value.
-//
-// value: If specified, only squeezes the dimensions listed. The dimension
-// index starts at 0. It is an error to squeeze a dimension that is not 1. Must
-// be in the range `[-rank(input), rank(input))`.
-// If not specified, defaults to <>
+// ResourceSparseApplyMomentumUseLocking sets the optional use_locking attribute to value.
//
-// REQUIRES: len(value) >= 0
-func SqueezeAxis(value []int64) SqueezeAttr {
+// value: If `True`, updating of the var and accum tensors will be protected
+// by a lock; otherwise the behavior is undefined, but may exhibit less
+// contention.
+// If not specified, defaults to false
+func ResourceSparseApplyMomentumUseLocking(value bool) ResourceSparseApplyMomentumAttr {
return func(m optionalAttr) {
- m["squeeze_dims"] = value
+ m["use_locking"] = value
}
}
-// Removes dimensions of size 1 from the shape of a tensor.
-//
-// Given a tensor `input`, this operation returns a tensor of the same type with
-// all dimensions of size 1 removed. If you don't want to remove all size 1
-// dimensions, you can remove specific size 1 dimensions by specifying
-// `axis`.
+// ResourceSparseApplyMomentumUseNesterov sets the optional use_nesterov attribute to value.
//
-// For example:
+// value: If `True`, the tensor passed to compute grad will be
+// var - lr * momentum * accum, so in the end, the var you get is actually
+// var - lr * momentum * accum.
+// If not specified, defaults to false
+func ResourceSparseApplyMomentumUseNesterov(value bool) ResourceSparseApplyMomentumAttr {
+ return func(m optionalAttr) {
+ m["use_nesterov"] = value
+ }
+}
+
+// Update relevant entries in '*var' and '*accum' according to the momentum scheme.
//
-// ```
-// # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
-// shape(squeeze(t)) ==> [2, 3]
-// ```
+// Set use_nesterov = True if you want to use Nesterov momentum.
//
-// Or, to remove specific size 1 dimensions:
+// That is for rows we have grad for, we update var and accum as follows:
//
-// ```
-// # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
-// shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
-// ```
+// accum = accum * momentum + grad
+// var -= lr * accum
//
// Arguments:
-// input: The `input` to squeeze.
+// var_: Should be from a Variable().
+// accum: Should be from a Variable().
+// lr: Learning rate. Must be a scalar.
+// grad: The gradient.
+// indices: A vector of indices into the first dimension of var and accum.
+// momentum: Momentum. Must be a scalar.
//
-// Returns Contains the same data as `input`, but has one or more dimensions of
-// size 1 removed.
-func Squeeze(scope *Scope, input tf.Output, optional ...SqueezeAttr) (output tf.Output) {
+// Returns the created operation.
+func ResourceSparseApplyMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, indices tf.Output, momentum tf.Output, optional ...ResourceSparseApplyMomentumAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
@@ -1133,284 +793,223 @@ func Squeeze(scope *Scope, input tf.Output, optional ...SqueezeAttr) (output tf.
a(attrs)
}
opspec := tf.OpSpec{
- Type: "Squeeze",
+ Type: "ResourceSparseApplyMomentum",
Input: []tf.Input{
- input,
+ var_, accum, lr, grad, indices, momentum,
},
Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// A placeholder op for a value that will be fed into the computation.
+// Creates a sequence of numbers.
//
-// DEPRECATED at GraphDef version 23: Placeholder now behaves the same as PlaceholderV2.
+// This operation creates a sequence of numbers that begins at `start` and
+// extends by increments of `delta` up to but not including `limit`.
//
-// N.B. This operation will fail with an error if it is executed. It is
-// intended as a way to represent a value that will always be fed, and to
-// provide attrs that enable the fed value to be checked at runtime.
+// For example:
+//
+// ```
+// # 'start' is 3
+// # 'limit' is 18
+// # 'delta' is 3
+// tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
+// ```
//
// Arguments:
-// dtype: The type of elements in the tensor.
-// shape: The shape of the tensor. The shape can be any partially-specified
-// shape. To be unconstrained, pass in a shape with unknown rank.
+// start: 0-D (scalar). First entry in the sequence.
+// limit: 0-D (scalar). Upper limit of sequence, exclusive.
+// delta: 0-D (scalar). Optional. Default is 1. Number that increments `start`.
//
-// Returns A placeholder tensor that must be replaced using the feed mechanism.
-func PlaceholderV2(scope *Scope, dtype tf.DataType, shape tf.Shape) (output tf.Output) {
+// Returns 1-D.
+func Range(scope *Scope, start tf.Output, limit tf.Output, delta tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
opspec := tf.OpSpec{
- Type: "PlaceholderV2",
-
- Attrs: attrs,
+ Type: "Range",
+ Input: []tf.Input{
+ start, limit, delta,
+ },
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Pads a tensor with mirrored values.
-//
-// This operation pads a `input` with mirrored values according to the `paddings`
-// you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is
-// the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
-// how many values to add before the contents of `input` in that dimension, and
-// `paddings[D, 1]` indicates how many values to add after the contents of `input`
-// in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater
-// than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true
-// (if false, respectively).
-//
-// The padded size of each dimension D of the output is:
-//
-// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
-//
-// For example:
+// Computes gradients for SparseSegmentSqrtN.
//
-// ```
-// # 't' is [[1, 2, 3], [4, 5, 6]].
-// # 'paddings' is [[1, 1]], [2, 2]].
-// # 'mode' is SYMMETRIC.
-// # rank of 't' is 2.
-// pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]
-// [2, 1, 1, 2, 3, 3, 2]
-// [5, 4, 4, 5, 6, 6, 5]
-// [5, 4, 4, 5, 6, 6, 5]]
-// ```
+// Returns tensor "output" with same shape as grad, except for dimension 0 whose
+// value is output_dim0.
//
// Arguments:
-// input: The input tensor to be padded.
-// paddings: A two-column matrix specifying the padding sizes. The number of
-// rows must be the same as the rank of `input`.
-// mode: Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions
-// do not include the borders, while in symmetric mode the padded regions
-// do include the borders. For example, if `input` is `[1, 2, 3]` and `paddings`
-// is `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and
-// it is `[1, 2, 3, 3, 2]` in symmetric mode.
-//
-// Returns The padded tensor.
-func MirrorPad(scope *Scope, input tf.Output, paddings tf.Output, mode string) (output tf.Output) {
+// grad: gradient propagated to the SparseSegmentSqrtN op.
+// indices: indices passed to the corresponding SparseSegmentSqrtN op.
+// segment_ids: segment_ids passed to the corresponding SparseSegmentSqrtN op.
+// output_dim0: dimension 0 of "data" passed to SparseSegmentSqrtN op.
+func SparseSegmentSqrtNGrad(scope *Scope, grad tf.Output, indices tf.Output, segment_ids tf.Output, output_dim0 tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"mode": mode}
opspec := tf.OpSpec{
- Type: "MirrorPad",
+ Type: "SparseSegmentSqrtNGrad",
Input: []tf.Input{
- input, paddings,
+ grad, indices, segment_ids, output_dim0,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Return the reduction indices for computing gradients of s0 op s1 with broadcast.
+// Computes the mean along sparse segments of a tensor.
//
-// This is typically used by gradient computations for a broadcasting operation.
-func BroadcastGradientArgs(scope *Scope, s0 tf.Output, s1 tf.Output) (r0 tf.Output, r1 tf.Output) {
+// Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+// segments.
+//
+// Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first
+// dimension, selecting a subset of dimension 0, specified by `indices`.
+//
+// Arguments:
+//
+// indices: A 1-D tensor. Has same rank as `segment_ids`.
+// segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
+//
+// Returns Has same shape as data, except for dimension 0 which
+// has size `k`, the number of segments.
+func SparseSegmentMean(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "BroadcastGradientArgs",
+ Type: "SparseSegmentMean",
Input: []tf.Input{
- s0, s1,
+ data, indices, segment_ids,
},
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
+ return op.Output(0)
}
-// Return the shape of s0 op s1 with broadcast.
+// Pop the element at the top of the stack.
//
-// Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the
-// broadcasted shape. `s0`, `s1` and `r0` are all integer vectors.
-func BroadcastArgs(scope *Scope, s0 tf.Output, s1 tf.Output) (r0 tf.Output) {
+// Arguments:
+// handle: The handle to a stack.
+// elem_type: The type of the elem that is popped.
+//
+// Returns The tensor that is popped from the top of the stack.
+func StackPopV2(scope *Scope, handle tf.Output, elem_type tf.DataType) (elem tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"elem_type": elem_type}
opspec := tf.OpSpec{
- Type: "BroadcastArgs",
+ Type: "StackPopV2",
Input: []tf.Input{
- s0, s1,
+ handle,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Returns locations of nonzero / true values in a tensor.
+// Computes the sum along sparse segments of a tensor.
//
-// This operation returns the coordinates of true elements in `condition`. The
-// coordinates are returned in a 2-D tensor where the first dimension (rows)
-// represents the number of true elements, and the second dimension (columns)
-// represents the coordinates of the true elements. Keep in mind, the shape of
-// the output tensor can vary depending on how many true values there are in
-// `condition`. Indices are output in row-major order.
+// Like `SparseSegmentSum`, but allows missing ids in `segment_ids`. If an id is
+// misisng, the `output` tensor at that position will be zeroed.
//
-// For example:
+// Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+// segments.
//
-// ```
-// # 'input' tensor is [[True, False]
-// # [True, False]]
-// # 'input' has two true values, so output has two coordinates.
-// # 'input' has rank of 2, so coordinates have two indices.
-// where(input) ==> [[0, 0],
-// [1, 0]]
+// For example:
//
-// # `condition` tensor is [[[True, False]
-// # [True, False]]
-// # [[False, True]
-// # [False, True]]
-// # [[False, False]
-// # [False, True]]]
-// # 'input' has 5 true values, so output has 5 coordinates.
-// # 'input' has rank of 3, so coordinates have three indices.
-// where(input) ==> [[0, 0, 0],
-// [0, 1, 0],
-// [1, 0, 1],
-// [1, 1, 1],
-// [2, 1, 1]]
+// ```python
+// c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
//
-// # `condition` tensor is [[[1.5, 0.0]
-// # [-0.5, 0.0]]
-// # [[0.0, 0.25]
-// # [0.0, 0.75]]
-// # [[0.0, 0.0]
-// # [0.0, 0.01]]]
-// # 'input' has 5 nonzero values, so output has 5 coordinates.
-// # 'input' has rank of 3, so coordinates have three indices.
-// where(input) ==> [[0, 0, 0],
-// [0, 1, 0],
-// [1, 0, 1],
-// [1, 1, 1],
-// [2, 1, 1]]
+// tf.sparse_segment_sum_with_num_segments(
+// c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3)
+// # => [[0 0 0 0]
+// # [0 0 0 0]
+// # [0 0 0 0]]
//
-// # `condition` tensor is [[[1.5 + 0.0j, 0.0 + 0.0j]
-// # [0.0 + 0.5j, 0.0 + 0.0j]]
-// # [[0.0 + 0.0j, 0.25 + 1.5j]
-// # [0.0 + 0.0j, 0.75 + 0.0j]]
-// # [[0.0 + 0.0j, 0.0 + 0.0j]
-// # [0.0 + 0.0j, 0.01 + 0.0j]]]
-// # 'input' has 5 nonzero magnitude values, so output has 5 coordinates.
-// # 'input' has rank of 3, so coordinates have three indices.
-// where(input) ==> [[0, 0, 0],
-// [0, 1, 0],
-// [1, 0, 1],
-// [1, 1, 1],
-// [2, 1, 1]]
+// tf.sparse_segment_sum_with_num_segments(c,
+// tf.constant([0, 1]),
+// tf.constant([0, 2],
+// num_segments=4))
+// # => [[ 1 2 3 4]
+// # [ 0 0 0 0]
+// # [-1 -2 -3 -4]
+// # [ 0 0 0 0]]
// ```
-func Where(scope *Scope, condition tf.Output) (index tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "Where",
- Input: []tf.Input{
- condition,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// Returns the gradient of `Tile`.
//
-// DEPRECATED at GraphDef version 3: TileGrad has been replaced with reduce_sum
+// Arguments:
//
-// Since `Tile` takes an input and repeats the input `multiples` times
-// along each dimension, `TileGrad` takes in `multiples` and aggregates
-// each repeated tile of `input` into `output`.
-func TileGrad(scope *Scope, input tf.Output, multiples tf.Output) (output tf.Output) {
+// indices: A 1-D tensor. Has same rank as `segment_ids`.
+// segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
+// num_segments: Should equal the number of distinct segment IDs.
+//
+// Returns Has same shape as data, except for dimension 0 which
+// has size `num_segments`.
+func SparseSegmentSumWithNumSegments(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "TileGrad",
+ Type: "SparseSegmentSumWithNumSegments",
Input: []tf.Input{
- input, multiples,
+ data, indices, segment_ids, num_segments,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// StridedSliceGradAttr is an optional argument to StridedSliceGrad.
-type StridedSliceGradAttr func(optionalAttr)
-
-// StridedSliceGradBeginMask sets the optional begin_mask attribute to value.
-// If not specified, defaults to 0
-func StridedSliceGradBeginMask(value int64) StridedSliceGradAttr {
- return func(m optionalAttr) {
- m["begin_mask"] = value
- }
-}
-
-// StridedSliceGradEndMask sets the optional end_mask attribute to value.
-// If not specified, defaults to 0
-func StridedSliceGradEndMask(value int64) StridedSliceGradAttr {
- return func(m optionalAttr) {
- m["end_mask"] = value
- }
-}
-
-// StridedSliceGradEllipsisMask sets the optional ellipsis_mask attribute to value.
-// If not specified, defaults to 0
-func StridedSliceGradEllipsisMask(value int64) StridedSliceGradAttr {
- return func(m optionalAttr) {
- m["ellipsis_mask"] = value
- }
-}
-
-// StridedSliceGradNewAxisMask sets the optional new_axis_mask attribute to value.
-// If not specified, defaults to 0
-func StridedSliceGradNewAxisMask(value int64) StridedSliceGradAttr {
- return func(m optionalAttr) {
- m["new_axis_mask"] = value
- }
-}
+// SparseToDenseAttr is an optional argument to SparseToDense.
+type SparseToDenseAttr func(optionalAttr)
-// StridedSliceGradShrinkAxisMask sets the optional shrink_axis_mask attribute to value.
-// If not specified, defaults to 0
-func StridedSliceGradShrinkAxisMask(value int64) StridedSliceGradAttr {
+// SparseToDenseValidateIndices sets the optional validate_indices attribute to value.
+//
+// value: If true, indices are checked to make sure they are sorted in
+// lexicographic order and that there are no repeats.
+// If not specified, defaults to true
+func SparseToDenseValidateIndices(value bool) SparseToDenseAttr {
return func(m optionalAttr) {
- m["shrink_axis_mask"] = value
+ m["validate_indices"] = value
}
}
-// Returns the gradient of `StridedSlice`.
+// Converts a sparse representation into a dense tensor.
//
-// Since `StridedSlice` cuts out pieces of its `input` which is size
-// `shape`, its gradient will have the same shape (which is passed here
-// as `shape`). The gradient will be zero in any element that the slice
-// does not select.
+// Builds an array `dense` with shape `output_shape` such that
//
-// Arguments are the same as StridedSliceGrad with the exception that
-// `dy` is the input gradient to be propagated and `shape` is the
-// shape of `StridedSlice`'s `input`.
-func StridedSliceGrad(scope *Scope, shape tf.Output, begin tf.Output, end tf.Output, strides tf.Output, dy tf.Output, optional ...StridedSliceGradAttr) (output tf.Output) {
+// ```
+// # If sparse_indices is scalar
+// dense[i] = (i == sparse_indices ? sparse_values : default_value)
+//
+// # If sparse_indices is a vector, then for each i
+// dense[sparse_indices[i]] = sparse_values[i]
+//
+// # If sparse_indices is an n by d matrix, then for each i in [0, n)
+// dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
+// ```
+//
+// All other values in `dense` are set to `default_value`. If `sparse_values` is a
+// scalar, all sparse indices are set to this single value.
+//
+// Indices should be sorted in lexicographic order, and indices must not
+// contain any repeats. If `validate_indices` is true, these properties
+// are checked during execution.
+//
+// Arguments:
+// sparse_indices: 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete
+// index where `sparse_values[i]` will be placed.
+// output_shape: 1-D. Shape of the dense output tensor.
+// sparse_values: 1-D. Values corresponding to each row of `sparse_indices`,
+// or a scalar value to be used for all sparse indices.
+// default_value: Scalar value to set for indices not specified in
+// `sparse_indices`.
+//
+// Returns Dense output tensor of shape `output_shape`.
+func SparseToDense(scope *Scope, sparse_indices tf.Output, output_shape tf.Output, sparse_values tf.Output, default_value tf.Output, optional ...SparseToDenseAttr) (dense tf.Output) {
if scope.Err() != nil {
return
}
@@ -1419,9 +1018,9 @@ func StridedSliceGrad(scope *Scope, shape tf.Output, begin tf.Output, end tf.Out
a(attrs)
}
opspec := tf.OpSpec{
- Type: "StridedSliceGrad",
+ Type: "SparseToDense",
Input: []tf.Input{
- shape, begin, end, strides, dy,
+ sparse_indices, output_shape, sparse_values, default_value,
},
Attrs: attrs,
}
@@ -1429,164 +1028,201 @@ func StridedSliceGrad(scope *Scope, shape tf.Output, begin tf.Output, end tf.Out
return op.Output(0)
}
-// Return a slice from 'input'.
+// Counts the number of occurrences of each value in an integer array.
//
-// The output tensor is a tensor with dimensions described by 'size'
-// whose values are extracted from 'input' starting at the offsets in
-// 'begin'.
+// Outputs a vector with length `size` and the same dtype as `weights`. If
+// `weights` are empty, then index `i` stores the number of times the value `i` is
+// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of
+// the value in `weights` at each index where the corresponding value in `arr` is
+// `i`.
//
-// *Requirements*:
-// 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n)
+// Values in `arr` outside of the range [0, size) are ignored.
//
// Arguments:
+// arr: int32 `Tensor`.
+// size: non-negative int32 scalar `Tensor`.
+// weights: is an int32, int64, float32, or float64 `Tensor` with the same
+// shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights
+// equal to 1.
//
-// begin: begin[i] specifies the offset into the 'i'th dimension of
-// 'input' to slice from.
-// size: size[i] specifies the number of elements of the 'i'th dimension
-// of 'input' to slice. If size[i] is -1, all remaining elements in dimension
-// i are included in the slice (i.e. this is equivalent to setting
-// size[i] = input.dim_size(i) - begin[i]).
-func Slice(scope *Scope, input tf.Output, begin tf.Output, size tf.Output) (output tf.Output) {
+// Returns 1D `Tensor` with length equal to `size`. The counts or summed weights for
+// each value in the range [0, size).
+func Bincount(scope *Scope, arr tf.Output, size tf.Output, weights tf.Output) (bins tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Slice",
+ Type: "Bincount",
Input: []tf.Input{
- input, begin, size,
+ arr, size, weights,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// UniqueV2Attr is an optional argument to UniqueV2.
-type UniqueV2Attr func(optionalAttr)
-
-// UniqueV2OutIdx sets the optional out_idx attribute to value.
-// If not specified, defaults to DT_INT32
-func UniqueV2OutIdx(value tf.DataType) UniqueV2Attr {
- return func(m optionalAttr) {
- m["out_idx"] = value
- }
-}
-
-// Finds unique elements in a 1-D tensor.
+// Computes the sum along sparse segments of a tensor.
//
-// This operation returns a tensor `y` containing all of the unique elements of `x`
-// sorted in the same order that they occur in `x`. This operation also returns a
-// tensor `idx` the same size as `x` that contains the index of each value of `x`
-// in the unique output `y`. In other words:
+// Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+// segments.
//
-// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
+// Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first
+// dimension, selecting a subset of dimension 0, specified by `indices`.
//
// For example:
//
-// ```
-// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
-// y, idx = unique(x)
-// y ==> [1, 2, 4, 7, 8]
-// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
+// ```python
+// c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
+//
+// # Select two rows, one segment.
+// tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
+// # => [[0 0 0 0]]
+//
+// # Select two rows, two segment.
+// tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
+// # => [[ 1 2 3 4]
+// # [-1 -2 -3 -4]]
+//
+// # Select all rows, two segments.
+// tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
+// # => [[0 0 0 0]
+// # [5 6 7 8]]
+//
+// # Which is equivalent to:
+// tf.segment_sum(c, tf.constant([0, 0, 1]))
// ```
//
// Arguments:
-// x: A `Tensor`.
-// axis: A `Tensor` of type `int64` (default: 0). The axis of the Tensor to
-// find the unique elements.
//
-// Returns A `Tensor`. Unique elements along the `axis` of `Tensor` x.A 1-D Tensor. Has the same type as x that contains the index of each
-// value of x in the output y.
-func UniqueV2(scope *Scope, x tf.Output, axis tf.Output, optional ...UniqueV2Attr) (y tf.Output, idx tf.Output) {
+// indices: A 1-D tensor. Has same rank as `segment_ids`.
+// segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
+//
+// Returns Has same shape as data, except for dimension 0 which
+// has size `k`, the number of segments.
+func SparseSegmentSum(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "UniqueV2",
+ Type: "SparseSegmentSum",
Input: []tf.Input{
- x, axis,
+ data, indices, segment_ids,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
+ return op.Output(0)
}
-// Shuffle dimensions of x according to a permutation and conjugate the result.
-//
-// The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
-// `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
-// `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])`
-func ConjugateTranspose(scope *Scope, x tf.Output, perm tf.Output) (y tf.Output) {
+// Computes hyperbolic sine of x element-wise.
+func Sinh(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "ConjugateTranspose",
+ Type: "Sinh",
Input: []tf.Input{
- x, perm,
+ x,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Checks a tensor for NaN and Inf values.
+// Computes the sum along segments of a tensor.
//
-// When run, reports an `InvalidArgument` error if `tensor` has any values
-// that are not a number (NaN) or infinity (Inf). Otherwise, passes `tensor` as-is.
+// Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+// segments.
+//
+// Computes a tensor such that
+// `(output[i] = sum_{j...} data[j...]` where the sum is over tuples `j...` such
+// that `segment_ids[j...] == i`. Unlike `SegmentSum`, `segment_ids`
+// need not be sorted and need not cover all values in the full
+// range of valid values.
+//
+// If the sum is empty for a given segment ID `i`, `output[i] = 0`.
+// If the given segment ID `i` is negative, the value is dropped and will not be
+// added to the sum of the segment.
+//
+// `num_segments` should equal the number of distinct segment IDs.
+//
+// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+// <img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentSum.png" alt>
+// </div>
//
// Arguments:
//
-// message: Prefix of the error message.
-func CheckNumerics(scope *Scope, tensor tf.Output, message string) (output tf.Output) {
+// segment_ids: A tensor whose shape is a prefix of `data.shape`.
+//
+//
+// Returns Has same shape as data, except for the first `segment_ids.rank`
+// dimensions, which are replaced with a single dimension which has size
+// `num_segments`.
+func UnsortedSegmentSum(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"message": message}
opspec := tf.OpSpec{
- Type: "CheckNumerics",
+ Type: "UnsortedSegmentSum",
Input: []tf.Input{
- tensor,
+ data, segment_ids, num_segments,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// PreventGradientAttr is an optional argument to PreventGradient.
-type PreventGradientAttr func(optionalAttr)
+// Returns which elements of x are finite.
+//
+// @compatibility(numpy)
+// Equivalent to np.isfinite
+// @end_compatibility
+func IsFinite(scope *Scope, x tf.Output) (y tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "IsFinite",
+ Input: []tf.Input{
+ x,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
-// PreventGradientMessage sets the optional message attribute to value.
+// MatMulAttr is an optional argument to MatMul.
+type MatMulAttr func(optionalAttr)
+
+// MatMulTransposeA sets the optional transpose_a attribute to value.
//
-// value: Will be printed in the error when anyone tries to differentiate
-// this operation.
-// If not specified, defaults to ""
-func PreventGradientMessage(value string) PreventGradientAttr {
+// value: If true, "a" is transposed before multiplication.
+// If not specified, defaults to false
+func MatMulTransposeA(value bool) MatMulAttr {
return func(m optionalAttr) {
- m["message"] = value
+ m["transpose_a"] = value
}
}
-// An identity op that triggers an error if a gradient is requested.
-//
-// When executed in a graph, this op outputs its input tensor as-is.
+// MatMulTransposeB sets the optional transpose_b attribute to value.
//
-// When building ops to compute gradients, the TensorFlow gradient system
-// will return an error when trying to lookup the gradient of this op,
-// because no gradient must ever be registered for this function. This
-// op exists to prevent subtle bugs from silently returning unimplemented
-// gradients in some corner cases.
+// value: If true, "b" is transposed before multiplication.
+// If not specified, defaults to false
+func MatMulTransposeB(value bool) MatMulAttr {
+ return func(m optionalAttr) {
+ m["transpose_b"] = value
+ }
+}
+
+// Multiply the matrix "a" by the matrix "b".
//
-// Arguments:
-// input: any tensor.
+// The inputs must be two-dimensional matrices and the inner dimension of
+// "a" (after being transposed if transpose_a is true) must match the
+// outer dimension of "b" (after being transposed if transposed_b is
+// true).
//
-// Returns the same input tensor.
-func PreventGradient(scope *Scope, input tf.Output, optional ...PreventGradientAttr) (output tf.Output) {
+// *Note*: The default kernel implementation for MatMul on GPUs uses
+// cublas.
+func MatMul(scope *Scope, a tf.Output, b tf.Output, optional ...MatMulAttr) (product tf.Output) {
if scope.Err() != nil {
return
}
@@ -1595,9 +1231,9 @@ func PreventGradient(scope *Scope, input tf.Output, optional ...PreventGradientA
a(attrs)
}
opspec := tf.OpSpec{
- Type: "PreventGradient",
+ Type: "MatMul",
Input: []tf.Input{
- input,
+ a, b,
},
Attrs: attrs,
}
@@ -1605,230 +1241,213 @@ func PreventGradient(scope *Scope, input tf.Output, optional ...PreventGradientA
return op.Output(0)
}
-// Stops gradient computation.
+// Selects elements from `x` or `y`, depending on `condition`.
//
-// When executed in a graph, this op outputs its input tensor as-is.
+// The `x`, and `y` tensors must all have the same shape, and the
+// output will also have that shape.
//
-// When building ops to compute gradients, this op prevents the contribution of
-// its inputs to be taken into account. Normally, the gradient generator adds ops
-// to a graph to compute the derivatives of a specified 'loss' by recursively
-// finding out inputs that contributed to its computation. If you insert this op
-// in the graph it inputs are masked from the gradient generator. They are not
-// taken into account for computing gradients.
+// The `condition` tensor must be a scalar if `x` and `y` are scalars.
+// If `x` and `y` are vectors or higher rank, then `condition` must be either a
+// scalar, a vector with size matching the first dimension of `x`, or must have
+// the same shape as `x`.
//
-// This is useful any time you want to compute a value with TensorFlow but need
-// to pretend that the value was a constant. Some examples include:
+// The `condition` tensor acts as a mask that chooses, based on the value at each
+// element, whether the corresponding element / row in the output should be
+// taken from `x` (if true) or `y` (if false).
//
-// * The *EM* algorithm where the *M-step* should not involve backpropagation
-// through the output of the *E-step*.
-// * Contrastive divergence training of Boltzmann machines where, when
-// differentiating the energy function, the training must not backpropagate
-// through the graph that generated the samples from the model.
-// * Adversarial training, where no backprop should happen through the adversarial
-// example generation process.
-func StopGradient(scope *Scope, input tf.Output) (output tf.Output) {
+// If `condition` is a vector and `x` and `y` are higher rank matrices, then
+// it chooses which row (outer dimension) to copy from `x` and `y`.
+// If `condition` has the same shape as `x` and `y`, then it chooses which
+// element to copy from `x` and `y`.
+//
+// For example:
+//
+// ```python
+// # 'condition' tensor is [[True, False]
+// # [False, True]]
+// # 't' is [[1, 2],
+// # [3, 4]]
+// # 'e' is [[5, 6],
+// # [7, 8]]
+// select(condition, t, e) # => [[1, 6], [7, 4]]
+//
+//
+// # 'condition' tensor is [True, False]
+// # 't' is [[1, 2],
+// # [3, 4]]
+// # 'e' is [[5, 6],
+// # [7, 8]]
+// select(condition, t, e) ==> [[1, 2],
+// [7, 8]]
+//
+// ```
+//
+// Arguments:
+//
+// x: = A `Tensor` which may have the same shape as `condition`.
+// If `condition` is rank 1, `x` may have higher rank,
+// but its first dimension must match the size of `condition`.
+// y: = A `Tensor` with the same type and shape as `x`.
+//
+// Returns = A `Tensor` with the same type and shape as `x` and `y`.
+func Select(scope *Scope, condition tf.Output, x tf.Output, y tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "StopGradient",
+ Type: "Select",
Input: []tf.Input{
- input,
+ condition, x, y,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Gather slices from `params` into a Tensor with shape specified by `indices`.
-//
-// `indices` is an K-dimensional integer tensor, best thought of as a
-// (K-1)-dimensional tensor of indices into `params`, where each element defines a
-// slice of `params`:
-//
-// output[i_0, ..., i_{K-2}] = params[indices[i0, ..., i_{K-2}]]
-//
-// Whereas in @{tf.gather} `indices` defines slices into the first
-// dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the
-// first `N` dimensions of `params`, where `N = indices.shape[-1]`.
-//
-// The last dimension of `indices` can be at most the rank of
-// `params`:
-//
-// indices.shape[-1] <= params.rank
-//
-// The last dimension of `indices` corresponds to elements
-// (if `indices.shape[-1] == params.rank`) or slices
-// (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`
-// of `params`. The output tensor has shape
-//
-// indices.shape[:-1] + params.shape[indices.shape[-1]:]
-//
-// Some examples below.
+// Returns the truth value of x OR y element-wise.
//
-// Simple indexing into a matrix:
+// *NOTE*: `LogicalOr` supports broadcasting. More about broadcasting
+// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+func LogicalOr(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "LogicalOr",
+ Input: []tf.Input{
+ x, y,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
+// Compute the regularized incomplete beta integral \\(I_x(a, b)\\).
//
-// ```python
-// indices = [[0, 0], [1, 1]]
-// params = [['a', 'b'], ['c', 'd']]
-// output = ['a', 'd']
-// ```
+// The regularized incomplete beta integral is defined as:
//
-// Slice indexing into a matrix:
//
-// ```python
-// indices = [[1], [0]]
-// params = [['a', 'b'], ['c', 'd']]
-// output = [['c', 'd'], ['a', 'b']]
-// ```
+// \\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\\)
//
-// Indexing into a 3-tensor:
+// where
//
-// ```python
-// indices = [[1]]
-// params = [[['a0', 'b0'], ['c0', 'd0']],
-// [['a1', 'b1'], ['c1', 'd1']]]
-// output = [[['a1', 'b1'], ['c1', 'd1']]]
//
+// \\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\\)
//
-// indices = [[0, 1], [1, 0]]
-// params = [[['a0', 'b0'], ['c0', 'd0']],
-// [['a1', 'b1'], ['c1', 'd1']]]
-// output = [['c0', 'd0'], ['a1', 'b1']]
//
+// is the incomplete beta function and \\(B(a, b)\\) is the *complete*
+// beta function.
+func Betainc(scope *Scope, a tf.Output, b tf.Output, x tf.Output) (z tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "Betainc",
+ Input: []tf.Input{
+ a, b, x,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
+// Computes the sum along sparse segments of a tensor divided by the sqrt of N.
//
-// indices = [[0, 0, 1], [1, 0, 1]]
-// params = [[['a0', 'b0'], ['c0', 'd0']],
-// [['a1', 'b1'], ['c1', 'd1']]]
-// output = ['b0', 'b1']
-// ```
+// N is the size of the segment being reduced.
//
-// Batched indexing into a matrix:
+// Like `SparseSegmentSqrtN`, but allows missing ids in `segment_ids`. If an id is
+// misisng, the `output` tensor at that position will be zeroed.
//
-// ```python
-// indices = [[[0, 0]], [[0, 1]]]
-// params = [['a', 'b'], ['c', 'd']]
-// output = [['a'], ['b']]
-// ```
+// Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+// segments.
//
-// Batched slice indexing into a matrix:
+// Arguments:
//
-// ```python
-// indices = [[[1]], [[0]]]
-// params = [['a', 'b'], ['c', 'd']]
-// output = [[['c', 'd']], [['a', 'b']]]
-// ```
+// indices: A 1-D tensor. Has same rank as `segment_ids`.
+// segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
+// num_segments: Should equal the number of distinct segment IDs.
//
-// Batched indexing into a 3-tensor:
+// Returns Has same shape as data, except for dimension 0 which
+// has size `k`, the number of segments.
+func SparseSegmentSqrtNWithNumSegments(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "SparseSegmentSqrtNWithNumSegments",
+ Input: []tf.Input{
+ data, indices, segment_ids, num_segments,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
+// Compute the upper regularized incomplete Gamma function `Q(a, x)`.
//
-// ```python
-// indices = [[[1]], [[0]]]
-// params = [[['a0', 'b0'], ['c0', 'd0']],
-// [['a1', 'b1'], ['c1', 'd1']]]
-// output = [[[['a1', 'b1'], ['c1', 'd1']]],
-// [[['a0', 'b0'], ['c0', 'd0']]]]
+// The upper regularized incomplete Gamma function is defined as:
//
-// indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
-// params = [[['a0', 'b0'], ['c0', 'd0']],
-// [['a1', 'b1'], ['c1', 'd1']]]
-// output = [[['c0', 'd0'], ['a1', 'b1']],
-// [['a0', 'b0'], ['c1', 'd1']]]
+// \\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\)
//
+// where
//
-// indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
-// params = [[['a0', 'b0'], ['c0', 'd0']],
-// [['a1', 'b1'], ['c1', 'd1']]]
-// output = [['b0', 'b1'], ['d0', 'c1']]
-// ```
+// \\(Gamma(a, x) = int_{x}^{\infty} t^{a-1} exp(-t) dt\\)
//
-// Arguments:
-// params: The tensor from which to gather values.
-// indices: Index tensor.
+// is the upper incomplete Gama function.
//
-// Returns Values from `params` gathered from indices given by `indices`, with
-// shape `indices.shape[:-1] + params.shape[indices.shape[-1]:]`.
-func GatherNd(scope *Scope, params tf.Output, indices tf.Output) (output tf.Output) {
+// Note, above `P(a, x)` (`Igamma`) is the lower regularized complete
+// Gamma function.
+func Igammac(scope *Scope, a tf.Output, x tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "GatherNd",
+ Type: "Igammac",
Input: []tf.Input{
- params, indices,
+ a, x,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// EditDistanceAttr is an optional argument to EditDistance.
-type EditDistanceAttr func(optionalAttr)
+// FakeQuantWithMinMaxVarsGradientAttr is an optional argument to FakeQuantWithMinMaxVarsGradient.
+type FakeQuantWithMinMaxVarsGradientAttr func(optionalAttr)
-// EditDistanceNormalize sets the optional normalize attribute to value.
-//
-// value: boolean (if true, edit distances are normalized by length of truth).
+// FakeQuantWithMinMaxVarsGradientNumBits sets the optional num_bits attribute to value.
//
-// The output is:
-// If not specified, defaults to true
-func EditDistanceNormalize(value bool) EditDistanceAttr {
+// value: The bitwidth of the quantization; between 2 and 8, inclusive.
+// If not specified, defaults to 8
+func FakeQuantWithMinMaxVarsGradientNumBits(value int64) FakeQuantWithMinMaxVarsGradientAttr {
return func(m optionalAttr) {
- m["normalize"] = value
+ m["num_bits"] = value
}
}
-// Computes the (possibly normalized) Levenshtein Edit Distance.
-//
-// The inputs are variable-length sequences provided by SparseTensors
-// (hypothesis_indices, hypothesis_values, hypothesis_shape)
-// and
-// (truth_indices, truth_values, truth_shape).
+// FakeQuantWithMinMaxVarsGradientNarrowRange sets the optional narrow_range attribute to value.
//
-// The inputs are:
+// value: Whether to quantize into 2^num_bits - 1 distinct values.
+// If not specified, defaults to false
+func FakeQuantWithMinMaxVarsGradientNarrowRange(value bool) FakeQuantWithMinMaxVarsGradientAttr {
+ return func(m optionalAttr) {
+ m["narrow_range"] = value
+ }
+}
+
+// Compute gradients for a FakeQuantWithMinMaxVars operation.
//
// Arguments:
-// hypothesis_indices: The indices of the hypothesis list SparseTensor.
-// This is an N x R int64 matrix.
-// hypothesis_values: The values of the hypothesis list SparseTensor.
-// This is an N-length vector.
-// hypothesis_shape: The shape of the hypothesis list SparseTensor.
-// This is an R-length vector.
-// truth_indices: The indices of the truth list SparseTensor.
-// This is an M x R int64 matrix.
-// truth_values: The values of the truth list SparseTensor.
-// This is an M-length vector.
-// truth_shape: truth indices, vector.
-//
-// Returns A dense float tensor with rank R - 1.
+// gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation.
+// inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation.
+// min, max: Quantization interval, scalar floats.
//
-// For the example input:
//
-// // hypothesis represents a 2x1 matrix with variable-length values:
-// // (0,0) = ["a"]
-// // (1,0) = ["b"]
-// hypothesis_indices = [[0, 0, 0],
-// [1, 0, 0]]
-// hypothesis_values = ["a", "b"]
-// hypothesis_shape = [2, 1, 1]
//
-// // truth represents a 2x2 matrix with variable-length values:
-// // (0,0) = []
-// // (0,1) = ["a"]
-// // (1,0) = ["b", "c"]
-// // (1,1) = ["a"]
-// truth_indices = [[0, 1, 0],
-// [1, 0, 0],
-// [1, 0, 1],
-// [1, 1, 0]]
-// truth_values = ["a", "b", "c", "a"]
-// truth_shape = [2, 2, 2]
-// normalize = true
-//
-// The output will be:
-//
-// // output is a 2x2 matrix with edit distances normalized by truth lengths.
-// output = [[inf, 1.0], // (0,0): no truth, (0,1): no hypothesis
-// [0.5, 1.0]] // (1,0): addition, (1,1): no hypothesis
-func EditDistance(scope *Scope, hypothesis_indices tf.Output, hypothesis_values tf.Output, hypothesis_shape tf.Output, truth_indices tf.Output, truth_values tf.Output, truth_shape tf.Output, optional ...EditDistanceAttr) (output tf.Output) {
+// Returns Backpropagated gradients w.r.t. inputs:
+// `gradients * (inputs >= min && inputs <= max)`.Backpropagated gradients w.r.t. min parameter:
+// `sum(gradients * (inputs < min))`.Backpropagated gradients w.r.t. max parameter:
+// `sum(gradients * (inputs > max))`.
+func FakeQuantWithMinMaxVarsGradient(scope *Scope, gradients tf.Output, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsGradientAttr) (backprops_wrt_input tf.Output, backprop_wrt_min tf.Output, backprop_wrt_max tf.Output) {
if scope.Err() != nil {
return
}
@@ -1837,181 +1456,191 @@ func EditDistance(scope *Scope, hypothesis_indices tf.Output, hypothesis_values
a(attrs)
}
opspec := tf.OpSpec{
- Type: "EditDistance",
+ Type: "FakeQuantWithMinMaxVarsGradient",
Input: []tf.Input{
- hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape,
+ gradients, inputs, min, max,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// Returns a batched matrix tensor with new batched diagonal values.
+// LogUniformCandidateSamplerAttr is an optional argument to LogUniformCandidateSampler.
+type LogUniformCandidateSamplerAttr func(optionalAttr)
+
+// LogUniformCandidateSamplerSeed sets the optional seed attribute to value.
//
-// Given `input` and `diagonal`, this operation returns a tensor with the
-// same shape and values as `input`, except for the main diagonal of the
-// innermost matrices. These will be overwritten by the values in `diagonal`.
+// value: If either seed or seed2 are set to be non-zero, the random number
+// generator is seeded by the given seed. Otherwise, it is seeded by a
+// random seed.
+// If not specified, defaults to 0
+func LogUniformCandidateSamplerSeed(value int64) LogUniformCandidateSamplerAttr {
+ return func(m optionalAttr) {
+ m["seed"] = value
+ }
+}
+
+// LogUniformCandidateSamplerSeed2 sets the optional seed2 attribute to value.
//
-// The output is computed as follows:
+// value: An second seed to avoid seed collision.
+// If not specified, defaults to 0
+func LogUniformCandidateSamplerSeed2(value int64) LogUniformCandidateSamplerAttr {
+ return func(m optionalAttr) {
+ m["seed2"] = value
+ }
+}
+
+// Generates labels for candidate sampling with a log-uniform distribution.
//
-// Assume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has
-// `k` dimensions `[I, J, K, ..., min(M, N)]`. Then the output is a
-// tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where:
+// See explanations of candidate sampling and the data formats at
+// go/candidate-sampling.
//
-// * `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.
-// * `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`.
+// For each batch, this op picks a single set of sampled candidate labels.
+//
+// The advantages of sampling candidates per-batch are simplicity and the
+// possibility of efficient dense matrix multiplication. The disadvantage is that
+// the sampled candidates must be chosen independently of the context and of the
+// true labels.
//
// Arguments:
-// input: Rank `k+1`, where `k >= 1`.
-// diagonal: Rank `k`, where `k >= 1`.
+// true_classes: A batch_size * num_true matrix, in which each row contains the
+// IDs of the num_true target_classes in the corresponding original label.
+// num_true: Number of true labels per context.
+// num_sampled: Number of candidates to randomly sample.
+// unique: If unique is true, we sample with rejection, so that all sampled
+// candidates in a batch are unique. This requires some approximation to
+// estimate the post-rejection sampling probabilities.
+// range_max: The sampler will sample integers from the interval [0, range_max).
//
-// Returns Rank `k+1`, with `output.shape = input.shape`.
-func MatrixSetDiag(scope *Scope, input tf.Output, diagonal tf.Output) (output tf.Output) {
+// Returns A vector of length num_sampled, in which each element is
+// the ID of a sampled candidate.A batch_size * num_true matrix, representing
+// the number of times each candidate is expected to occur in a batch
+// of sampled candidates. If unique=true, then this is a probability.A vector of length num_sampled, for each sampled
+// candidate representing the number of times the candidate is expected
+// to occur in a batch of sampled candidates. If unique=true, then this is a
+// probability.
+func LogUniformCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...LogUniformCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "MatrixSetDiag",
+ Type: "LogUniformCandidateSampler",
Input: []tf.Input{
- input, diagonal,
+ true_classes,
},
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0), op.Output(1), op.Output(2)
+}
+
+// ApproximateEqualAttr is an optional argument to ApproximateEqual.
+type ApproximateEqualAttr func(optionalAttr)
+
+// ApproximateEqualTolerance sets the optional tolerance attribute to value.
+// If not specified, defaults to 1e-05
+func ApproximateEqualTolerance(value float32) ApproximateEqualAttr {
+ return func(m optionalAttr) {
+ m["tolerance"] = value
+ }
+}
+
+// Returns the truth value of abs(x-y) < tolerance element-wise.
+func ApproximateEqual(scope *Scope, x tf.Output, y tf.Output, optional ...ApproximateEqualAttr) (z tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
+ opspec := tf.OpSpec{
+ Type: "ApproximateEqual",
+ Input: []tf.Input{
+ x, y,
+ },
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Returns the diagonal part of the tensor.
-//
-// This operation returns a tensor with the `diagonal` part
-// of the `input`. The `diagonal` part is computed as follows:
-//
-// Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a
-// tensor of rank `k` with dimensions `[D1,..., Dk]` where:
-//
-// `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.
-//
-// For example:
-//
-// ```
-// # 'input' is [[1, 0, 0, 0]
-// [0, 2, 0, 0]
-// [0, 0, 3, 0]
-// [0, 0, 0, 4]]
-//
-// tf.diag_part(input) ==> [1, 2, 3, 4]
-// ```
+// Returns x / y element-wise.
//
-// Arguments:
-// input: Rank k tensor where k is even and not zero.
+// *NOTE*: `Div` supports broadcasting. More about broadcasting
+// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+func Div(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "Div",
+ Input: []tf.Input{
+ x, y,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
+// Returns x * y element-wise.
//
-// Returns The extracted diagonal.
-func DiagPart(scope *Scope, input tf.Output) (diagonal tf.Output) {
+// *NOTE*: `Multiply` supports broadcasting. More about broadcasting
+// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+func Mul(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "DiagPart",
+ Type: "Mul",
Input: []tf.Input{
- input,
+ x, y,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// DequantizeAttr is an optional argument to Dequantize.
-type DequantizeAttr func(optionalAttr)
+// SparseReduceSumSparseAttr is an optional argument to SparseReduceSumSparse.
+type SparseReduceSumSparseAttr func(optionalAttr)
-// DequantizeMode sets the optional mode attribute to value.
-// If not specified, defaults to "MIN_COMBINED"
-func DequantizeMode(value string) DequantizeAttr {
+// SparseReduceSumSparseKeepDims sets the optional keep_dims attribute to value.
+//
+// value: If true, retain reduced dimensions with length 1.
+// If not specified, defaults to false
+func SparseReduceSumSparseKeepDims(value bool) SparseReduceSumSparseAttr {
return func(m optionalAttr) {
- m["mode"] = value
+ m["keep_dims"] = value
}
}
-// Dequantize the 'input' tensor into a float Tensor.
-//
-// [min_range, max_range] are scalar floats that specify the range for
-// the 'input' data. The 'mode' attribute controls exactly which calculations are
-// used to convert the float values to their quantized equivalents.
-//
-// In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
-//
-// ```
-// if T == qint8, in[i] += (range(T) + 1)/ 2.0
-// out[i] = min_range + (in[i]* (max_range - min_range) / range(T))
-// ```
-// here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
-//
-// *MIN_COMBINED Mode Example*
-//
-// If the input comes from a QuantizedRelu6, the output type is
-// quint8 (range of 0-255) but the possible range of QuantizedRelu6 is
-// 0-6. The min_range and max_range values are therefore 0.0 and 6.0.
-// Dequantize on quint8 will take each value, cast to float, and multiply
-// by 6 / 255.
-// Note that if quantizedtype is qint8, the operation will additionally add
-// each value by 128 prior to casting.
-//
-// If the mode is 'MIN_FIRST', then this approach is used:
-//
-// ```c++
-// num_discrete_values = 1 << (# of bits in T)
-// range_adjust = num_discrete_values / (num_discrete_values - 1)
-// range = (range_max - range_min) * range_adjust
-// range_scale = range / num_discrete_values
-// const double offset_input = static_cast<double>(input) - lowest_quantized;
-// result = range_min + ((input - numeric_limits<T>::min()) * range_scale)
-// ```
-//
-// *SCALED mode Example*
-//
-// `SCALED` mode matches the quantization approach used in
-// `QuantizeAndDequantize{V2|V3}`.
-//
-// If the mode is `SCALED`, we do not use the full range of the output type,
-// choosing to elide the lowest possible value for symmetry (e.g., output range is
-// -127 to 127, not -128 to 127 for signed 8 bit quantization), so that 0.0 maps to
-// 0.
-//
-// We first find the range of values in our tensor. The
-// range we use is always centered on 0, so we find m such that
-// ```c++
-// m = max(abs(input_min), abs(input_max))
-// ```
-//
-// Our input tensor range is then `[-m, m]`.
-//
-// Next, we choose our fixed-point quantization buckets, `[min_fixed, max_fixed]`.
-// If T is signed, this is
-// ```
-// num_bits = sizeof(T) * 8
-// [min_fixed, max_fixed] =
-// [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1]
-// ```
+// Computes the sum of elements across dimensions of a SparseTensor.
//
-// Otherwise, if T is unsigned, the fixed-point range is
-// ```
-// [min_fixed, max_fixed] = [0, (1 << num_bits) - 1]
-// ```
+// This Op takes a SparseTensor and is the sparse counterpart to
+// `tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a
+// SparseTensor.
//
-// From this we compute our scaling factor, s:
-// ```c++
-// s = (2 * m) / (max_fixed - min_fixed)
-// ```
+// Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
+// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
+// with length 1.
//
-// Now we can dequantize the elements of our tensor:
-// ```c++
-// result = input * s
-// ```
+// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
+// with a single element is returned. Additionally, the axes can be negative,
+// which are interpreted according to the indexing rules in Python.
//
// Arguments:
-//
-// min_range: The minimum scalar value possibly produced for the input.
-// max_range: The maximum scalar value possibly produced for the input.
-func Dequantize(scope *Scope, input tf.Output, min_range tf.Output, max_range tf.Output, optional ...DequantizeAttr) (output tf.Output) {
+// input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
+// SparseTensor, possibly not in canonical ordering.
+// input_values: 1-D. `N` non-empty values corresponding to `input_indices`.
+// input_shape: 1-D. Shape of the input SparseTensor.
+// reduction_axes: 1-D. Length-`K` vector containing the reduction axes.
+func SparseReduceSumSparse(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceSumSparseAttr) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
if scope.Err() != nil {
return
}
@@ -2020,305 +1649,298 @@ func Dequantize(scope *Scope, input tf.Output, min_range tf.Output, max_range tf
a(attrs)
}
opspec := tf.OpSpec{
- Type: "Dequantize",
+ Type: "SparseReduceSumSparse",
Input: []tf.Input{
- input, min_range, max_range,
+ input_indices, input_values, input_shape, reduction_axes,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// Returns a tensor of zeros with the same shape and type as x.
+// BiasAddAttr is an optional argument to BiasAdd.
+type BiasAddAttr func(optionalAttr)
+
+// BiasAddDataFormat sets the optional data_format attribute to value.
+//
+// value: Specify the data format of the input and output data. With the
+// default format "NHWC", the bias tensor will be added to the last dimension
+// of the value tensor.
+// Alternatively, the format could be "NCHW", the data storage order of:
+// [batch, in_channels, in_height, in_width].
+// The tensor will be added to "in_channels", the third-to-the-last
+// dimension.
+// If not specified, defaults to "NHWC"
+func BiasAddDataFormat(value string) BiasAddAttr {
+ return func(m optionalAttr) {
+ m["data_format"] = value
+ }
+}
+
+// Adds `bias` to `value`.
+//
+// This is a special case of `tf.add` where `bias` is restricted to be 1-D.
+// Broadcasting is supported, so `value` may have any number of dimensions.
//
// Arguments:
-// x: a tensor of type T.
+// value: Any number of dimensions.
+// bias: 1-D with size the last dimension of `value`.
//
-// Returns a tensor of the same shape and type as x but filled with zeros.
-func ZerosLike(scope *Scope, x tf.Output) (y tf.Output) {
+// Returns Broadcasted sum of `value` and `bias`.
+func BiasAdd(scope *Scope, value tf.Output, bias tf.Output, optional ...BiasAddAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "ZerosLike",
+ Type: "BiasAdd",
Input: []tf.Input{
- x,
+ value, bias,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Gives a guarantee to the TF runtime that the input tensor is a constant.
+// BiasAddGradAttr is an optional argument to BiasAddGrad.
+type BiasAddGradAttr func(optionalAttr)
+
+// BiasAddGradDataFormat sets the optional data_format attribute to value.
//
-// The runtime is then free to make optimizations based on this.
+// value: Specify the data format of the input and output data. With the
+// default format "NHWC", the bias tensor will be added to the last dimension
+// of the value tensor.
+// Alternatively, the format could be "NCHW", the data storage order of:
+// [batch, in_channels, in_height, in_width].
+// The tensor will be added to "in_channels", the third-to-the-last
+// dimension.
+// If not specified, defaults to "NHWC"
+func BiasAddGradDataFormat(value string) BiasAddGradAttr {
+ return func(m optionalAttr) {
+ m["data_format"] = value
+ }
+}
+
+// The backward operation for "BiasAdd" on the "bias" tensor.
//
-// Only accepts value typed tensors as inputs and rejects resource variable handles
-// as input.
+// It accumulates all the values from out_backprop into the feature dimension.
+// For NHWC data format, the feature dimension is the last. For NCHW data format,
+// the feature dimension is the third-to-last.
//
-// Returns the input tensor without modification.
-func GuaranteeConst(scope *Scope, input tf.Output) (output tf.Output) {
+// Arguments:
+// out_backprop: Any number of dimensions.
+//
+// Returns 1-D with size the feature dimension of `out_backprop`.
+func BiasAddGrad(scope *Scope, out_backprop tf.Output, optional ...BiasAddGradAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "GuaranteeConst",
+ Type: "BiasAddGrad",
Input: []tf.Input{
- input,
+ out_backprop,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Splits a tensor into `num_split` tensors along one dimension.
-//
-// Arguments:
-// value: The tensor to split.
-// size_splits: list containing the sizes of each output tensor along the split
-// dimension. Must sum to the dimension of value along split_dim.
-// Can contain one -1 indicating that dimension is to be inferred.
-// axis: 0-D. The dimension along which to split. Must be in the range
-// `[-rank(value), rank(value))`.
-//
+// Returns x + y element-wise.
//
-// Returns Tensors whose shape matches that of `value`
-// except along `axis`, where their sizes are
-// `size_splits[i]`.
-func SplitV(scope *Scope, value tf.Output, size_splits tf.Output, axis tf.Output, num_split int64) (output []tf.Output) {
+// *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
+// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+func AddV2(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"num_split": num_split}
opspec := tf.OpSpec{
- Type: "SplitV",
+ Type: "AddV2",
Input: []tf.Input{
- value, size_splits, axis,
+ x, y,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
- if scope.Err() != nil {
- return
- }
- var idx int
- var err error
- if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
- scope.UpdateErr("SplitV", err)
- return
- }
- return output
+ return op.Output(0)
}
-// Splits a tensor into `num_split` tensors along one dimension.
-//
-// Arguments:
-// axis: 0-D. The dimension along which to split. Must be in the range
-// `[-rank(value), rank(value))`.
-// value: The tensor to split.
-// num_split: The number of ways to split. Must evenly divide
-// `value.shape[split_dim]`.
+// Returns x + y element-wise.
//
-// Returns They are identically shaped tensors, whose shape matches that of `value`
-// except along `axis`, where their sizes are
-// `values.shape[split_dim] / num_split`.
-func Split(scope *Scope, axis tf.Output, value tf.Output, num_split int64) (output []tf.Output) {
+// *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
+// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+func Add(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"num_split": num_split}
opspec := tf.OpSpec{
- Type: "Split",
+ Type: "Add",
Input: []tf.Input{
- axis, value,
+ x, y,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
- if scope.Err() != nil {
- return
- }
- var idx int
- var err error
- if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
- scope.UpdateErr("Split", err)
- return
+ return op.Output(0)
+}
+
+// NthElementAttr is an optional argument to NthElement.
+type NthElementAttr func(optionalAttr)
+
+// NthElementReverse sets the optional reverse attribute to value.
+//
+// value: When set to True, find the nth-largest value in the vector and vice
+// versa.
+// If not specified, defaults to false
+func NthElementReverse(value bool) NthElementAttr {
+ return func(m optionalAttr) {
+ m["reverse"] = value
}
- return output
}
-// Computes offsets of concat inputs within its output.
+// Finds values of the `n`-th order statistic for the last dimension.
//
-// For example:
+// If the input is a vector (rank-1), finds the entries which is the nth-smallest
+// value in the vector and outputs their values as scalar tensor.
//
-// ```
-// # 'x' is [2, 2, 7]
-// # 'y' is [2, 3, 7]
-// # 'z' is [2, 5, 7]
-// concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0]
-// ```
+// For matrices (resp. higher rank input), computes the entries which is the
+// nth-smallest value in each row (resp. vector along the last dimension). Thus,
//
-// This is typically used by gradient computations for a concat operation.
+// values.shape = input.shape[:-1]
//
// Arguments:
-// concat_dim: The dimension along which to concatenate.
-// shape: The `N` int32 vectors representing shape of tensors being concatenated.
+// input: 1-D or higher with last dimension at least `n+1`.
+// n: 0-D. Position of sorted vector to select along the last dimension (along
+// each row for matrices). Valid range of n is `[0, input.shape[:-1])`
//
-// Returns The `N` int32 vectors representing the starting offset
-// of input tensors within the concatenated output.
-func ConcatOffset(scope *Scope, concat_dim tf.Output, shape []tf.Output) (offset []tf.Output) {
+// Returns The `n`-th order statistic along each last dimensional slice.
+func NthElement(scope *Scope, input tf.Output, n tf.Output, optional ...NthElementAttr) (values tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "ConcatOffset",
+ Type: "NthElement",
Input: []tf.Input{
- concat_dim, tf.OutputList(shape),
+ input, n,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
- if scope.Err() != nil {
- return
- }
- var idx int
- var err error
- if offset, idx, err = makeOutputList(op, idx, "offset"); err != nil {
- scope.UpdateErr("ConcatOffset", err)
- return
- }
- return offset
+ return op.Output(0)
}
-// Writes a `Summary` protocol buffer with a histogram.
+// Computes the Max along segments of a tensor.
//
-// The generated
-// [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
-// has one summary value containing a histogram for `values`.
+// Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+// segments.
//
-// This op reports an `InvalidArgument` error if any value is not finite.
+// This operator is similar to the [unsorted segment sum operator](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
+// Instead of computing the sum over segments, it computes the maximum
+// such that:
+//
+// \\(output_i = \max_j data_j\\) where max is over `j` such
+// that `segment_ids[j] == i`.
+//
+// If the maximum is empty for a given segment ID `i`, it outputs the smallest possible value for specific numeric type,
+// `output[i] = numeric_limits<T>::min()`.
+//
+// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+// <img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentMax.png" alt>
+// </div>
//
// Arguments:
-// writer: A handle to a summary writer.
-// step: The step to write the summary for.
-// tag: Scalar. Tag to use for the `Summary.Value`.
-// values: Any shape. Values to use to build the histogram.
//
-// Returns the created operation.
-func WriteHistogramSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, values tf.Output) (o *tf.Operation) {
+// segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s
+// first dimension.
+//
+//
+// Returns Has same shape as data, except for dimension 0 which
+// has size `num_segments`.
+func UnsortedSegmentMax(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "WriteHistogramSummary",
+ Type: "UnsortedSegmentMax",
Input: []tf.Input{
- writer, step, tag, values,
+ data, segment_ids, num_segments,
},
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Concatenates tensors along one dimension.
-//
-// Arguments:
-// concat_dim: 0-D. The dimension along which to concatenate. Must be in the
-// range [0, rank(values)).
-// values: The `N` Tensors to concatenate. Their ranks and types must match,
-// and their sizes must match in all dimensions except `concat_dim`.
-//
-// Returns A `Tensor` with the concatenation of values stacked along the
-// `concat_dim` dimension. This tensor's shape matches that of `values` except
-// in `concat_dim` where it has the sum of the sizes.
-func Concat(scope *Scope, concat_dim tf.Output, values []tf.Output) (output tf.Output) {
+// Computes exponential of x element-wise. \\(y = e^x\\).
+func Exp(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Concat",
+ Type: "Exp",
Input: []tf.Input{
- concat_dim, tf.OutputList(values),
+ x,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Concatenates a list of `N` tensors along the first dimension.
-//
-// The input tensors are all required to have size 1 in the first dimension.
-//
-// For example:
-//
-// ```
-// # 'x' is [[1, 4]]
-// # 'y' is [[2, 5]]
-// # 'z' is [[3, 6]]
-// parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.
-// ```
-//
-// The difference between concat and parallel_concat is that concat requires all
-// of the inputs be computed before the operation will begin but doesn't require
-// that the input shapes be known during graph construction. Parallel concat
-// will copy pieces of the input into the output as they become available, in
-// some situations this can provide a performance benefit.
+// Returns an element-wise indication of the sign of a number.
//
-// Arguments:
-// values: Tensors to be concatenated. All must have size 1 in the first dimension
-// and same shape.
-// shape: the final shape of the result; should be equal to the shapes of any input
-// but with the number of input values in the first dimension.
+// `y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`.
//
-// Returns The concatenated tensor.
-func ParallelConcat(scope *Scope, values []tf.Output, shape tf.Shape) (output tf.Output) {
+// For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
+func Sign(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"shape": shape}
opspec := tf.OpSpec{
- Type: "ParallelConcat",
+ Type: "Sign",
Input: []tf.Input{
- tf.OutputList(values),
+ x,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// UniqueAttr is an optional argument to Unique.
-type UniqueAttr func(optionalAttr)
+// QuantizedAddAttr is an optional argument to QuantizedAdd.
+type QuantizedAddAttr func(optionalAttr)
-// UniqueOutIdx sets the optional out_idx attribute to value.
-// If not specified, defaults to DT_INT32
-func UniqueOutIdx(value tf.DataType) UniqueAttr {
+// QuantizedAddToutput sets the optional Toutput attribute to value.
+// If not specified, defaults to DT_QINT32
+func QuantizedAddToutput(value tf.DataType) QuantizedAddAttr {
return func(m optionalAttr) {
- m["out_idx"] = value
+ m["Toutput"] = value
}
}
-// Finds unique elements in a 1-D tensor.
-//
-// This operation returns a tensor `y` containing all of the unique elements of `x`
-// sorted in the same order that they occur in `x`. This operation also returns a
-// tensor `idx` the same size as `x` that contains the index of each value of `x`
-// in the unique output `y`. In other words:
+// Returns x + y element-wise, working on quantized buffers.
//
-// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
+// Arguments:
//
-// For example:
//
-// ```
-// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
-// y, idx = unique(x)
-// y ==> [1, 2, 4, 7, 8]
-// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
-// ```
+// min_x: The float value that the lowest quantized `x` value represents.
+// max_x: The float value that the highest quantized `x` value represents.
+// min_y: The float value that the lowest quantized `y` value represents.
+// max_y: The float value that the highest quantized `y` value represents.
//
-// Arguments:
-// x: 1-D.
+// Returns The float value that the lowest quantized output value represents.The float value that the highest quantized output value represents.
//
-// Returns 1-D.1-D.
-func Unique(scope *Scope, x tf.Output, optional ...UniqueAttr) (y tf.Output, idx tf.Output) {
+// *NOTE*: `QuantizedAdd` supports limited forms of broadcasting. More about
+// broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+func QuantizedAdd(scope *Scope, x tf.Output, y tf.Output, min_x tf.Output, max_x tf.Output, min_y tf.Output, max_y tf.Output, optional ...QuantizedAddAttr) (z tf.Output, min_z tf.Output, max_z tf.Output) {
if scope.Err() != nil {
return
}
@@ -2327,61 +1949,37 @@ func Unique(scope *Scope, x tf.Output, optional ...UniqueAttr) (y tf.Output, idx
a(attrs)
}
opspec := tf.OpSpec{
- Type: "Unique",
+ Type: "QuantizedAdd",
Input: []tf.Input{
- x,
+ x, y, min_x, max_x, min_y, max_y,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// DecodeWavAttr is an optional argument to DecodeWav.
-type DecodeWavAttr func(optionalAttr)
-
-// DecodeWavDesiredChannels sets the optional desired_channels attribute to value.
-//
-// value: Number of sample channels wanted.
-// If not specified, defaults to -1
-func DecodeWavDesiredChannels(value int64) DecodeWavAttr {
- return func(m optionalAttr) {
- m["desired_channels"] = value
- }
-}
+// ArgMinAttr is an optional argument to ArgMin.
+type ArgMinAttr func(optionalAttr)
-// DecodeWavDesiredSamples sets the optional desired_samples attribute to value.
-//
-// value: Length of audio requested.
-// If not specified, defaults to -1
-func DecodeWavDesiredSamples(value int64) DecodeWavAttr {
+// ArgMinOutputType sets the optional output_type attribute to value.
+// If not specified, defaults to DT_INT64
+func ArgMinOutputType(value tf.DataType) ArgMinAttr {
return func(m optionalAttr) {
- m["desired_samples"] = value
+ m["output_type"] = value
}
}
-// Decode a 16-bit PCM WAV file to a float tensor.
-//
-// The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float.
-//
-// When desired_channels is set, if the input contains fewer channels than this
-// then the last channel will be duplicated to give the requested number, else if
-// the input has more channels than requested then the additional channels will be
-// ignored.
-//
-// If desired_samples is set, then the audio will be cropped or padded with zeroes
-// to the requested length.
+// Returns the index with the smallest value across dimensions of a tensor.
//
-// The first output contains a Tensor with the content of the audio samples. The
-// lowest dimension will be the number of channels, and the second will be the
-// number of samples. For example, a ten-sample-long stereo WAV file should give an
-// output shape of [10, 2].
+// Note that in case of ties the identity of the return value is not guaranteed.
//
// Arguments:
-// contents: The WAV-encoded audio, usually from a file.
//
-// Returns 2-D with shape `[length, channels]`.Scalar holding the sample rate found in the WAV header.
-func DecodeWav(scope *Scope, contents tf.Output, optional ...DecodeWavAttr) (audio tf.Output, sample_rate tf.Output) {
+// dimension: int32 or int64, must be in the range `[-rank(input), rank(input))`.
+// Describes which dimension of the input Tensor to reduce across. For vectors,
+// use dimension = 0.
+func ArgMin(scope *Scope, input tf.Output, dimension tf.Output, optional ...ArgMinAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -2390,462 +1988,387 @@ func DecodeWav(scope *Scope, contents tf.Output, optional ...DecodeWavAttr) (aud
a(attrs)
}
opspec := tf.OpSpec{
- Type: "DecodeWav",
+ Type: "ArgMin",
Input: []tf.Input{
- contents,
+ input, dimension,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
+ return op.Output(0)
}
-// Elementwise computes the bitwise right-shift of `x` and `y`.
+// Convert the quantized 'input' tensor into a lower-precision 'output', using the
//
-// Performs a logical shift for unsigned integer types, and an arithmetic shift
-// for signed integer types.
+// output range specified with 'requested_output_min' and 'requested_output_max'.
//
-// If `y` is negative, or greater than or equal to than the width of `x` in bits
-// the result is implementation defined.
-func RightShift(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
+// [input_min, input_max] are scalar floats that specify the range for the float
+// interpretation of the 'input' data. For example, if input_min is -1.0f and
+// input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0
+// value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
+//
+// Arguments:
+//
+// input_min: The float value that the minimum quantized input value represents.
+// input_max: The float value that the maximum quantized input value represents.
+// requested_output_min: The float value that the minimum quantized output value represents.
+// requested_output_max: The float value that the maximum quantized output value represents.
+// out_type: The type of the output. Should be a lower bit depth than Tinput.
+//
+// Returns The requested_output_min value is copied into this output.The requested_output_max value is copied into this output.
+func Requantize(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, requested_output_min tf.Output, requested_output_max tf.Output, out_type tf.DataType) (output tf.Output, output_min tf.Output, output_max tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"out_type": out_type}
opspec := tf.OpSpec{
- Type: "RightShift",
+ Type: "Requantize",
Input: []tf.Input{
- x, y,
+ input, input_min, input_max, requested_output_min, requested_output_max,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// Elementwise computes the bitwise left-shift of `x` and `y`.
+// Computes the determinant of one or more square matrices.
//
-// If `y` is negative, or greater than or equal to the width of `x` in bits the
-// result is implementation defined.
-func LeftShift(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
+// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
+// form square matrices. The output is a tensor containing the determinants
+// for all input submatrices `[..., :, :]`.
+//
+// Arguments:
+// input: Shape is `[..., M, M]`.
+//
+// Returns Shape is `[...]`.
+func MatrixDeterminant(scope *Scope, input tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "LeftShift",
+ Type: "MatrixDeterminant",
Input: []tf.Input{
- x, y,
+ input,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Elementwise computes the bitwise AND of `x` and `y`.
-//
-// The result will have those bits set, that are set in both `x` and `y`. The
-// computation is performed on the underlying representations of `x` and `y`.
-func BitwiseAnd(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
+// Computes sin of x element-wise.
+func Sin(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "BitwiseAnd",
+ Type: "Sin",
Input: []tf.Input{
- x, y,
+ x,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// FixedUnigramCandidateSamplerAttr is an optional argument to FixedUnigramCandidateSampler.
-type FixedUnigramCandidateSamplerAttr func(optionalAttr)
+// Computes the complementary error function of `x` element-wise.
+func Erfc(scope *Scope, x tf.Output) (y tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "Erfc",
+ Input: []tf.Input{
+ x,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
-// FixedUnigramCandidateSamplerVocabFile sets the optional vocab_file attribute to value.
+// Computes Psi, the derivative of Lgamma (the log of the absolute value of
//
-// value: Each valid line in this file (which should have a CSV-like format)
-// corresponds to a valid word ID. IDs are in sequential order, starting from
-// num_reserved_ids. The last entry in each line is expected to be a value
-// corresponding to the count or relative probability. Exactly one of vocab_file
-// and unigrams needs to be passed to this op.
-// If not specified, defaults to ""
-func FixedUnigramCandidateSamplerVocabFile(value string) FixedUnigramCandidateSamplerAttr {
- return func(m optionalAttr) {
- m["vocab_file"] = value
+// `Gamma(x)`), element-wise.
+func Digamma(scope *Scope, x tf.Output) (y tf.Output) {
+ if scope.Err() != nil {
+ return
}
+ opspec := tf.OpSpec{
+ Type: "Digamma",
+ Input: []tf.Input{
+ x,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// FixedUnigramCandidateSamplerDistortion sets the optional distortion attribute to value.
-//
-// value: The distortion is used to skew the unigram probability distribution.
-// Each weight is first raised to the distortion's power before adding to the
-// internal unigram distribution. As a result, distortion = 1.0 gives regular
-// unigram sampling (as defined by the vocab file), and distortion = 0.0 gives
-// a uniform distribution.
-// If not specified, defaults to 1
-func FixedUnigramCandidateSamplerDistortion(value float32) FixedUnigramCandidateSamplerAttr {
+// Conv2DBackpropFilterAttr is an optional argument to Conv2DBackpropFilter.
+type Conv2DBackpropFilterAttr func(optionalAttr)
+
+// Conv2DBackpropFilterUseCudnnOnGpu sets the optional use_cudnn_on_gpu attribute to value.
+// If not specified, defaults to true
+func Conv2DBackpropFilterUseCudnnOnGpu(value bool) Conv2DBackpropFilterAttr {
return func(m optionalAttr) {
- m["distortion"] = value
+ m["use_cudnn_on_gpu"] = value
}
}
-// FixedUnigramCandidateSamplerNumReservedIds sets the optional num_reserved_ids attribute to value.
+// Conv2DBackpropFilterDataFormat sets the optional data_format attribute to value.
//
-// value: Optionally some reserved IDs can be added in the range [0,
-// ..., num_reserved_ids) by the users. One use case is that a special unknown
-// word token is used as ID 0. These IDs will have a sampling probability of 0.
-// If not specified, defaults to 0
-func FixedUnigramCandidateSamplerNumReservedIds(value int64) FixedUnigramCandidateSamplerAttr {
+// value: Specify the data format of the input and output data. With the
+// default format "NHWC", the data is stored in the order of:
+// [batch, in_height, in_width, in_channels].
+// Alternatively, the format could be "NCHW", the data storage order of:
+// [batch, in_channels, in_height, in_width].
+// If not specified, defaults to "NHWC"
+func Conv2DBackpropFilterDataFormat(value string) Conv2DBackpropFilterAttr {
return func(m optionalAttr) {
- m["num_reserved_ids"] = value
+ m["data_format"] = value
}
}
-// FixedUnigramCandidateSamplerNumShards sets the optional num_shards attribute to value.
-//
-// value: A sampler can be used to sample from a subset of the original range
-// in order to speed up the whole computation through parallelism. This parameter
-// (together with 'shard') indicates the number of partitions that are being
-// used in the overall computation.
-// If not specified, defaults to 1
+// Conv2DBackpropFilterDilations sets the optional dilations attribute to value.
//
-// REQUIRES: value >= 1
-func FixedUnigramCandidateSamplerNumShards(value int64) FixedUnigramCandidateSamplerAttr {
+// value: 1-D tensor of length 4. The dilation factor for each dimension of
+// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
+// element on that dimension. The dimension order is determined by the value of
+// `data_format`, see above for details. Dilations in the batch and depth
+// dimensions must be 1.
+// If not specified, defaults to <i:1 i:1 i:1 i:1 >
+func Conv2DBackpropFilterDilations(value []int64) Conv2DBackpropFilterAttr {
return func(m optionalAttr) {
- m["num_shards"] = value
+ m["dilations"] = value
}
}
-// FixedUnigramCandidateSamplerShard sets the optional shard attribute to value.
+// Computes the gradients of convolution with respect to the filter.
//
-// value: A sampler can be used to sample from a subset of the original range
-// in order to speed up the whole computation through parallelism. This parameter
-// (together with 'num_shards') indicates the particular partition number of a
-// sampler op, when partitioning is being used.
-// If not specified, defaults to 0
+// Arguments:
+// input: 4-D with shape `[batch, in_height, in_width, in_channels]`.
+// filter_sizes: An integer vector representing the tensor shape of `filter`,
+// where `filter` is a 4-D
+// `[filter_height, filter_width, in_channels, out_channels]` tensor.
+// out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`.
+// Gradients w.r.t. the output of the convolution.
+// strides: The stride of the sliding window for each dimension of the input
+// of the convolution. Must be in the same order as the dimension specified with
+// format.
+// padding: The type of padding algorithm to use.
//
-// REQUIRES: value >= 0
-func FixedUnigramCandidateSamplerShard(value int64) FixedUnigramCandidateSamplerAttr {
- return func(m optionalAttr) {
- m["shard"] = value
+// Returns 4-D with shape
+// `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t.
+// the `filter` input of the convolution.
+func Conv2DBackpropFilter(scope *Scope, input tf.Output, filter_sizes tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv2DBackpropFilterAttr) (output tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{"strides": strides, "padding": padding}
+ for _, a := range optional {
+ a(attrs)
}
+ opspec := tf.OpSpec{
+ Type: "Conv2DBackpropFilter",
+ Input: []tf.Input{
+ input, filter_sizes, out_backprop,
+ },
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// FixedUnigramCandidateSamplerUnigrams sets the optional unigrams attribute to value.
+// Returns the number of work units this Reader has finished processing.
//
-// value: A list of unigram counts or probabilities, one per ID in sequential
-// order. Exactly one of vocab_file and unigrams should be passed to this op.
-// If not specified, defaults to <>
-func FixedUnigramCandidateSamplerUnigrams(value []float32) FixedUnigramCandidateSamplerAttr {
- return func(m optionalAttr) {
- m["unigrams"] = value
+// Arguments:
+// reader_handle: Handle to a Reader.
+func ReaderNumWorkUnitsCompletedV2(scope *Scope, reader_handle tf.Output) (units_completed tf.Output) {
+ if scope.Err() != nil {
+ return
}
+ opspec := tf.OpSpec{
+ Type: "ReaderNumWorkUnitsCompletedV2",
+ Input: []tf.Input{
+ reader_handle,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// FixedUnigramCandidateSamplerSeed sets the optional seed attribute to value.
+// Returns x / y element-wise for real types.
//
-// value: If either seed or seed2 are set to be non-zero, the random number
-// generator is seeded by the given seed. Otherwise, it is seeded by a
-// random seed.
-// If not specified, defaults to 0
-func FixedUnigramCandidateSamplerSeed(value int64) FixedUnigramCandidateSamplerAttr {
- return func(m optionalAttr) {
- m["seed"] = value
+// If `x` and `y` are reals, this will return the floating-point division.
+//
+// *NOTE*: `Div` supports broadcasting. More about broadcasting
+// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+func RealDiv(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "RealDiv",
+ Input: []tf.Input{
+ x, y,
+ },
}
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// FixedUnigramCandidateSamplerSeed2 sets the optional seed2 attribute to value.
-//
-// value: An second seed to avoid seed collision.
-// If not specified, defaults to 0
-func FixedUnigramCandidateSamplerSeed2(value int64) FixedUnigramCandidateSamplerAttr {
- return func(m optionalAttr) {
- m["seed2"] = value
+// Computes the log of the absolute value of `Gamma(x)` element-wise.
+func Lgamma(scope *Scope, x tf.Output) (y tf.Output) {
+ if scope.Err() != nil {
+ return
}
+ opspec := tf.OpSpec{
+ Type: "Lgamma",
+ Input: []tf.Input{
+ x,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Generates labels for candidate sampling with a learned unigram distribution.
-//
-// A unigram sampler could use a fixed unigram distribution read from a
-// file or passed in as an in-memory array instead of building up the distribution
-// from data on the fly. There is also an option to skew the distribution by
-// applying a distortion power to the weights.
-//
-// The vocabulary file should be in CSV-like format, with the last field
-// being the weight associated with the word.
-//
-// For each batch, this op picks a single set of sampled candidate labels.
+// Computes the reverse mode backpropagated gradient of the Cholesky algorithm.
//
-// The advantages of sampling candidates per-batch are simplicity and the
-// possibility of efficient dense matrix multiplication. The disadvantage is that
-// the sampled candidates must be chosen independently of the context and of the
-// true labels.
+// For an explanation see "Differentiation of the Cholesky algorithm" by
+// Iain Murray http://arxiv.org/abs/1602.07527.
//
// Arguments:
-// true_classes: A batch_size * num_true matrix, in which each row contains the
-// IDs of the num_true target_classes in the corresponding original label.
-// num_true: Number of true labels per context.
-// num_sampled: Number of candidates to randomly sample.
-// unique: If unique is true, we sample with rejection, so that all sampled
-// candidates in a batch are unique. This requires some approximation to
-// estimate the post-rejection sampling probabilities.
-// range_max: The sampler will sample integers from the interval [0, range_max).
+// l: Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`.
+// Algorithm depends only on lower triangular part of the innermost matrices of
+// this tensor.
+// grad: df/dl where f is some scalar function. Shape is `[..., M, M]`.
+// Algorithm depends only on lower triangular part of the innermost matrices of
+// this tensor.
//
-// Returns A vector of length num_sampled, in which each element is
-// the ID of a sampled candidate.A batch_size * num_true matrix, representing
-// the number of times each candidate is expected to occur in a batch
-// of sampled candidates. If unique=true, then this is a probability.A vector of length num_sampled, for each sampled
-// candidate representing the number of times the candidate is expected
-// to occur in a batch of sampled candidates. If unique=true, then this is a
-// probability.
-func FixedUnigramCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...FixedUnigramCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
+// Returns Symmetrized version of df/dA . Shape is `[..., M, M]`
+func CholeskyGrad(scope *Scope, l tf.Output, grad tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "FixedUnigramCandidateSampler",
+ Type: "CholeskyGrad",
Input: []tf.Input{
- true_classes,
+ l, grad,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
+ return op.Output(0)
}
-// UniformCandidateSamplerAttr is an optional argument to UniformCandidateSampler.
-type UniformCandidateSamplerAttr func(optionalAttr)
-
-// UniformCandidateSamplerSeed sets the optional seed attribute to value.
-//
-// value: If either seed or seed2 are set to be non-zero, the random number
-// generator is seeded by the given seed. Otherwise, it is seeded by a
-// random seed.
-// If not specified, defaults to 0
-func UniformCandidateSamplerSeed(value int64) UniformCandidateSamplerAttr {
- return func(m optionalAttr) {
- m["seed"] = value
+// Computes inverse hyperbolic cosine of x element-wise.
+func Acosh(scope *Scope, x tf.Output) (y tf.Output) {
+ if scope.Err() != nil {
+ return
}
+ opspec := tf.OpSpec{
+ Type: "Acosh",
+ Input: []tf.Input{
+ x,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// UniformCandidateSamplerSeed2 sets the optional seed2 attribute to value.
+// SerializeManySparseAttr is an optional argument to SerializeManySparse.
+type SerializeManySparseAttr func(optionalAttr)
+
+// SerializeManySparseOutType sets the optional out_type attribute to value.
//
-// value: An second seed to avoid seed collision.
-// If not specified, defaults to 0
-func UniformCandidateSamplerSeed2(value int64) UniformCandidateSamplerAttr {
+// value: The `dtype` to use for serialization; the supported types are `string`
+// (default) and `variant`.
+// If not specified, defaults to DT_STRING
+func SerializeManySparseOutType(value tf.DataType) SerializeManySparseAttr {
return func(m optionalAttr) {
- m["seed2"] = value
+ m["out_type"] = value
}
}
-// Generates labels for candidate sampling with a uniform distribution.
-//
-// See explanations of candidate sampling and the data formats at
-// go/candidate-sampling.
+// Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object.
//
-// For each batch, this op picks a single set of sampled candidate labels.
+// The `SparseTensor` must have rank `R` greater than 1, and the first dimension
+// is treated as the minibatch dimension. Elements of the `SparseTensor`
+// must be sorted in increasing order of this first dimension. The serialized
+// `SparseTensor` objects going into each row of `serialized_sparse` will have
+// rank `R-1`.
//
-// The advantages of sampling candidates per-batch are simplicity and the
-// possibility of efficient dense matrix multiplication. The disadvantage is that
-// the sampled candidates must be chosen independently of the context and of the
-// true labels.
+// The minibatch size `N` is extracted from `sparse_shape[0]`.
//
// Arguments:
-// true_classes: A batch_size * num_true matrix, in which each row contains the
-// IDs of the num_true target_classes in the corresponding original label.
-// num_true: Number of true labels per context.
-// num_sampled: Number of candidates to randomly sample.
-// unique: If unique is true, we sample with rejection, so that all sampled
-// candidates in a batch are unique. This requires some approximation to
-// estimate the post-rejection sampling probabilities.
-// range_max: The sampler will sample integers from the interval [0, range_max).
-//
-// Returns A vector of length num_sampled, in which each element is
-// the ID of a sampled candidate.A batch_size * num_true matrix, representing
-// the number of times each candidate is expected to occur in a batch
-// of sampled candidates. If unique=true, then this is a probability.A vector of length num_sampled, for each sampled
-// candidate representing the number of times the candidate is expected
-// to occur in a batch of sampled candidates. If unique=true, then this is a
-// probability.
-func UniformCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...UniformCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
+// sparse_indices: 2-D. The `indices` of the minibatch `SparseTensor`.
+// sparse_values: 1-D. The `values` of the minibatch `SparseTensor`.
+// sparse_shape: 1-D. The `shape` of the minibatch `SparseTensor`.
+func SerializeManySparse(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...SerializeManySparseAttr) (serialized_sparse tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "UniformCandidateSampler",
+ Type: "SerializeManySparse",
Input: []tf.Input{
- true_classes,
+ sparse_indices, sparse_values, sparse_shape,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
+ return op.Output(0)
}
-// AbortAttr is an optional argument to Abort.
-type AbortAttr func(optionalAttr)
+// TensorArrayV2Attr is an optional argument to TensorArrayV2.
+type TensorArrayV2Attr func(optionalAttr)
-// AbortErrorMsg sets the optional error_msg attribute to value.
-//
-// value: A string which is the message associated with the exception.
-// If not specified, defaults to ""
-func AbortErrorMsg(value string) AbortAttr {
+// TensorArrayV2ElementShape sets the optional element_shape attribute to value.
+// If not specified, defaults to <unknown_rank:true >
+func TensorArrayV2ElementShape(value tf.Shape) TensorArrayV2Attr {
return func(m optionalAttr) {
- m["error_msg"] = value
+ m["element_shape"] = value
}
}
-// AbortExitWithoutError sets the optional exit_without_error attribute to value.
+// TensorArrayV2DynamicSize sets the optional dynamic_size attribute to value.
// If not specified, defaults to false
-func AbortExitWithoutError(value bool) AbortAttr {
+func TensorArrayV2DynamicSize(value bool) TensorArrayV2Attr {
return func(m optionalAttr) {
- m["exit_without_error"] = value
+ m["dynamic_size"] = value
}
}
-// Raise a exception to abort the process when called.
-//
-// If exit_without_error is true, the process will exit normally,
-// otherwise it will exit with a SIGABORT signal.
-//
-// Returns nothing but an exception.
-//
-// Returns the created operation.
-func Abort(scope *Scope, optional ...AbortAttr) (o *tf.Operation) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
- opspec := tf.OpSpec{
- Type: "Abort",
-
- Attrs: attrs,
+// TensorArrayV2ClearAfterRead sets the optional clear_after_read attribute to value.
+// If not specified, defaults to true
+func TensorArrayV2ClearAfterRead(value bool) TensorArrayV2Attr {
+ return func(m optionalAttr) {
+ m["clear_after_read"] = value
}
- return scope.AddOperation(opspec)
}
-// SpaceToDepthAttr is an optional argument to SpaceToDepth.
-type SpaceToDepthAttr func(optionalAttr)
-
-// SpaceToDepthDataFormat sets the optional data_format attribute to value.
-// If not specified, defaults to "NHWC"
-func SpaceToDepthDataFormat(value string) SpaceToDepthAttr {
+// TensorArrayV2TensorArrayName sets the optional tensor_array_name attribute to value.
+// If not specified, defaults to ""
+func TensorArrayV2TensorArrayName(value string) TensorArrayV2Attr {
return func(m optionalAttr) {
- m["data_format"] = value
+ m["tensor_array_name"] = value
}
}
-// SpaceToDepth for tensors of type T.
-//
-// Rearranges blocks of spatial data, into depth. More specifically,
-// this op outputs a copy of the input tensor where values from the `height`
-// and `width` dimensions are moved to the `depth` dimension.
-// The attr `block_size` indicates the input block size.
-//
-// * Non-overlapping blocks of size `block_size x block size` are rearranged
-// into depth at each location.
-// * The depth of the output tensor is `block_size * block_size * input_depth`.
-// * The Y, X coordinates within each block of the input become the high order
-// component of the output channel index.
-// * The input tensor's height and width must be divisible by block_size.
-//
-// The `data_format` attr specifies the layout of the input and output tensors
-// with the following options:
-// "NHWC": `[ batch, height, width, channels ]`
-// "NCHW": `[ batch, channels, height, width ]`
-// "NCHW_VECT_C":
-// `qint8 [ batch, channels / 4, height, width, 4 ]`
-//
-// It is useful to consider the operation as transforming a 6-D Tensor.
-// e.g. for data_format = NHWC,
-// Each element in the input tensor can be specified via 6 coordinates,
-// ordered by decreasing memory layout significance as:
-// n,oY,bY,oX,bX,iC (where n=batch index, oX, oY means X or Y coordinates
-// within the output image, bX, bY means coordinates
-// within the input block, iC means input channels).
-// The output would be a transpose to the following layout:
-// n,oY,oX,bY,bX,iC
-//
-// This operation is useful for resizing the activations between convolutions
-// (but keeping all data), e.g. instead of pooling. It is also useful for training
-// purely convolutional models.
-//
-// For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" and
-// block_size = 2:
-//
-// ```
-// x = [[[[1], [2]],
-// [[3], [4]]]]
-// ```
-//
-// This operation will output a tensor of shape `[1, 1, 1, 4]`:
-//
-// ```
-// [[[[1, 2, 3, 4]]]]
-// ```
-//
-// Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`,
-// the corresponding output will have a single element (i.e. width and height are
-// both 1) and will have a depth of 4 channels (1 * block_size * block_size).
-// The output element shape is `[1, 1, 4]`.
-//
-// For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.
-//
-// ```
-// x = [[[[1, 2, 3], [4, 5, 6]],
-// [[7, 8, 9], [10, 11, 12]]]]
-// ```
-//
-// This operation, for block_size of 2, will return the following tensor of shape
-// `[1, 1, 1, 12]`
-//
-// ```
-// [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
-// ```
-//
-// Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:
-//
-// ```
-// x = [[[[1], [2], [5], [6]],
-// [[3], [4], [7], [8]],
-// [[9], [10], [13], [14]],
-// [[11], [12], [15], [16]]]]
-// ```
-//
-// the operator will return the following tensor of shape `[1 2 2 4]`:
-//
-// ```
-// x = [[[[1, 2, 3, 4],
-// [5, 6, 7, 8]],
-// [[9, 10, 11, 12],
-// [13, 14, 15, 16]]]]
-// ```
-//
-// Arguments:
-//
-// block_size: The size of the spatial block.
-func SpaceToDepth(scope *Scope, input tf.Output, block_size int64, optional ...SpaceToDepthAttr) (output tf.Output) {
+// Deprecated. Use TensorArrayV3
+func TensorArrayV2(scope *Scope, size tf.Output, dtype tf.DataType, optional ...TensorArrayV2Attr) (handle tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"block_size": block_size}
+ attrs := map[string]interface{}{"dtype": dtype}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "SpaceToDepth",
+ Type: "TensorArrayV2",
Input: []tf.Input{
- input,
+ size,
},
Attrs: attrs,
}
@@ -2853,239 +2376,135 @@ func SpaceToDepth(scope *Scope, input tf.Output, block_size int64, optional ...S
return op.Output(0)
}
-// Scatter `updates` into a new (initially zero) tensor according to `indices`.
-//
-// Creates a new tensor by applying sparse `updates` to individual
-// values or slices within a zero tensor of the given `shape` according to
-// indices. This operator is the inverse of the @{tf.gather_nd} operator which
-// extracts values or slices from a given tensor.
-//
-// **WARNING**: The order in which updates are applied is nondeterministic, so the
-// output will be nondeterministic if `indices` contains duplicates.
-//
-// `indices` is an integer tensor containing indices into a new tensor of shape
-// `shape`. The last dimension of `indices` can be at most the rank of `shape`:
-//
-// indices.shape[-1] <= shape.rank
-//
-// The last dimension of `indices` corresponds to indices into elements
-// (if `indices.shape[-1] = shape.rank`) or slices
-// (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of
-// `shape`. `updates` is a tensor with shape
-//
-// indices.shape[:-1] + shape[indices.shape[-1]:]
-//
-// The simplest form of scatter is to insert individual elements in a tensor by
-// index. For example, say we want to insert 4 scattered elements in a rank-1
-// tensor with 8 elements.
-//
-// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-// <img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd1.png" alt>
-// </div>
-//
-// In Python, this scatter operation would look like this:
-//
-// ```python
-// indices = tf.constant([[4], [3], [1], [7]])
-// updates = tf.constant([9, 10, 11, 12])
-// shape = tf.constant([8])
-// scatter = tf.scatter_nd(indices, updates, shape)
-// with tf.Session() as sess:
-// print(sess.run(scatter))
-// ```
-//
-// The resulting tensor would look like this:
-//
-// [0, 11, 0, 10, 9, 0, 0, 12]
-//
-// We can also, insert entire slices of a higher rank tensor all at once. For
-// example, if we wanted to insert two slices in the first dimension of a
-// rank-3 tensor with two matrices of new values.
-//
-// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-// <img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd2.png" alt>
-// </div>
-//
-// In Python, this scatter operation would look like this:
-//
-// ```python
-// indices = tf.constant([[0], [2]])
-// updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
-// [7, 7, 7, 7], [8, 8, 8, 8]],
-// [[5, 5, 5, 5], [6, 6, 6, 6],
-// [7, 7, 7, 7], [8, 8, 8, 8]]])
-// shape = tf.constant([4, 4, 4])
-// scatter = tf.scatter_nd(indices, updates, shape)
-// with tf.Session() as sess:
-// print(sess.run(scatter))
-// ```
+// Computes the mean along sparse segments of a tensor.
//
-// The resulting tensor would look like this:
+// Like `SparseSegmentMean`, but allows missing ids in `segment_ids`. If an id is
+// misisng, the `output` tensor at that position will be zeroed.
//
-// [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
-// [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
-// [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
-// [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]
+// Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+// segments.
//
// Arguments:
-// indices: Index tensor.
-// updates: Updates to scatter into output.
-// shape: 1-D. The shape of the resulting tensor.
//
-// Returns A new tensor with the given shape and updates applied according
-// to the indices.
-func ScatterNd(scope *Scope, indices tf.Output, updates tf.Output, shape tf.Output) (output tf.Output) {
+// indices: A 1-D tensor. Has same rank as `segment_ids`.
+// segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
+// num_segments: Should equal the number of distinct segment IDs.
+//
+// Returns Has same shape as data, except for dimension 0 which has size
+// `num_segments`.
+func SparseSegmentMeanWithNumSegments(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "ScatterNd",
+ Type: "SparseSegmentMeanWithNumSegments",
Input: []tf.Input{
- indices, updates, shape,
+ data, indices, segment_ids, num_segments,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Exits the current frame to its parent frame.
-//
-// Exit makes its input `data` available to the parent frame.
-//
-// Arguments:
-// data: The tensor to be made available to the parent frame.
-//
-// Returns The same tensor as `data`.
-func Exit(scope *Scope, data tf.Output) (output tf.Output) {
+// Computes hyperbolic cosine of x element-wise.
+func Cosh(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Exit",
+ Type: "Cosh",
Input: []tf.Input{
- data,
+ x,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// EnterAttr is an optional argument to Enter.
-type EnterAttr func(optionalAttr)
-
-// EnterIsConstant sets the optional is_constant attribute to value.
-//
-// value: If true, the output is constant within the child frame.
-// If not specified, defaults to false
-func EnterIsConstant(value bool) EnterAttr {
- return func(m optionalAttr) {
- m["is_constant"] = value
+// Creates a dataset that emits each dim-0 slice of `components` once.
+func TensorSliceDataset(scope *Scope, components []tf.Output, output_shapes []tf.Shape) (handle tf.Output) {
+ if scope.Err() != nil {
+ return
}
-}
-
-// EnterParallelIterations sets the optional parallel_iterations attribute to value.
-//
-// value: The number of iterations allowed to run in parallel.
-// If not specified, defaults to 10
-func EnterParallelIterations(value int64) EnterAttr {
- return func(m optionalAttr) {
- m["parallel_iterations"] = value
+ attrs := map[string]interface{}{"output_shapes": output_shapes}
+ opspec := tf.OpSpec{
+ Type: "TensorSliceDataset",
+ Input: []tf.Input{
+ tf.OutputList(components),
+ },
+ Attrs: attrs,
}
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Creates or finds a child frame, and makes `data` available to the child frame.
-//
-// This op is used together with `Exit` to create loops in the graph.
-// The unique `frame_name` is used by the `Executor` to identify frames. If
-// `is_constant` is true, `output` is a constant in the child frame; otherwise
-// it may be changed in the child frame. At most `parallel_iterations` iterations
-// are run in parallel in the child frame.
-//
-// Arguments:
-// data: The tensor to be made available to the child frame.
-// frame_name: The name of the child frame.
+// Computes natural logarithm of (1 + x) element-wise.
//
-// Returns The same tensor as `data`.
-func Enter(scope *Scope, data tf.Output, frame_name string, optional ...EnterAttr) (output tf.Output) {
+// I.e., \\(y = \log_e (1 + x)\\).
+func Log1p(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"frame_name": frame_name}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "Enter",
+ Type: "Log1p",
Input: []tf.Input{
- data,
+ x,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Forwards `data` to the output port determined by `pred`.
-//
-// If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,
-// the data goes to `output_false`.
-//
-// See also `RefSwitch` and `Merge`.
+// Computes rectified linear 6 gradients for a Relu6 operation.
//
// Arguments:
-// data: The tensor to be forwarded to the appropriate output.
-// pred: A scalar that specifies which output port will receive data.
+// gradients: The backpropagated gradients to the corresponding Relu6 operation.
+// features: The features passed as input to the corresponding Relu6 operation, or
+// its output; using either one produces the same result.
//
-// Returns If `pred` is false, data will be forwarded to this output.If `pred` is true, data will be forwarded to this output.
-func Switch(scope *Scope, data tf.Output, pred tf.Output) (output_false tf.Output, output_true tf.Output) {
+// Returns The gradients:
+// `gradients * (features > 0) * (features < 6)`.
+func Relu6Grad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Switch",
+ Type: "Relu6Grad",
Input: []tf.Input{
- data, pred,
+ gradients, features,
},
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
+ return op.Output(0)
}
-// CTCGreedyDecoderAttr is an optional argument to CTCGreedyDecoder.
-type CTCGreedyDecoderAttr func(optionalAttr)
+// ResizeBicubicAttr is an optional argument to ResizeBicubic.
+type ResizeBicubicAttr func(optionalAttr)
-// CTCGreedyDecoderMergeRepeated sets the optional merge_repeated attribute to value.
+// ResizeBicubicAlignCorners sets the optional align_corners attribute to value.
//
-// value: If True, merge repeated classes in output.
+// value: If true, rescale input by (new_height - 1) / (height - 1), which
+// exactly aligns the 4 corners of images and resized images. If false, rescale
+// by new_height / height. Treat similarly the width dimension.
// If not specified, defaults to false
-func CTCGreedyDecoderMergeRepeated(value bool) CTCGreedyDecoderAttr {
+func ResizeBicubicAlignCorners(value bool) ResizeBicubicAttr {
return func(m optionalAttr) {
- m["merge_repeated"] = value
+ m["align_corners"] = value
}
}
-// Performs greedy decoding on the logits given in inputs.
-//
-// A note about the attribute merge_repeated: if enabled, when
-// consecutive logits' maximum indices are the same, only the first of
-// these is emitted. Labeling the blank '*', the sequence "A B B * B B"
-// becomes "A B B" if merge_repeated = True and "A B B B B" if
-// merge_repeated = False.
+// Resize `images` to `size` using bicubic interpolation.
//
-// Regardless of the value of merge_repeated, if the maximum index of a given
-// time and batch corresponds to the blank, index `(num_classes - 1)`, no new
-// element is emitted.
+// Input images can be of different types but output images are always float.
//
// Arguments:
-// inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
-// sequence_length: A vector containing sequence lengths, size `(batch_size)`.
+// images: 4-D with shape `[batch, height, width, channels]`.
+// size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The
+// new size for the images.
//
-// Returns Indices matrix, size `(total_decoded_outputs x 2)`,
-// of a `SparseTensor<int64, 2>`. The rows store: [batch, time].Values vector, size: `(total_decoded_outputs)`,
-// of a `SparseTensor<int64, 2>`. The vector stores the decoded classes.Shape vector, size `(2)`, of the decoded SparseTensor.
-// Values are: `[batch_size, max_decoded_length]`.Matrix, size `(batch_size x 1)`, containing sequence
-// log-probabilities.
-func CTCGreedyDecoder(scope *Scope, inputs tf.Output, sequence_length tf.Output, optional ...CTCGreedyDecoderAttr) (decoded_indices tf.Output, decoded_values tf.Output, decoded_shape tf.Output, log_probability tf.Output) {
+// Returns 4-D with shape
+// `[batch, new_height, new_width, channels]`.
+func ResizeBicubic(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeBicubicAttr) (resized_images tf.Output) {
if scope.Err() != nil {
return
}
@@ -3094,138 +2513,132 @@ func CTCGreedyDecoder(scope *Scope, inputs tf.Output, sequence_length tf.Output,
a(attrs)
}
opspec := tf.OpSpec{
- Type: "CTCGreedyDecoder",
+ Type: "ResizeBicubic",
Input: []tf.Input{
- inputs, sequence_length,
+ images, size,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
-}
-
-// CTCLossAttr is an optional argument to CTCLoss.
-type CTCLossAttr func(optionalAttr)
-
-// CTCLossPreprocessCollapseRepeated sets the optional preprocess_collapse_repeated attribute to value.
-//
-// value: Scalar, if true then repeated labels are
-// collapsed prior to the CTC calculation.
-// If not specified, defaults to false
-func CTCLossPreprocessCollapseRepeated(value bool) CTCLossAttr {
- return func(m optionalAttr) {
- m["preprocess_collapse_repeated"] = value
- }
+ return op.Output(0)
}
-// CTCLossCtcMergeRepeated sets the optional ctc_merge_repeated attribute to value.
+// Computes natural logarithm of x element-wise.
//
-// value: Scalar. If set to false, *during* CTC calculation
-// repeated non-blank labels will not be merged and are interpreted as
-// individual labels. This is a simplified version of CTC.
-// If not specified, defaults to true
-func CTCLossCtcMergeRepeated(value bool) CTCLossAttr {
- return func(m optionalAttr) {
- m["ctc_merge_repeated"] = value
+// I.e., \\(y = \log_e x\\).
+func Log(scope *Scope, x tf.Output) (y tf.Output) {
+ if scope.Err() != nil {
+ return
}
-}
-
-// CTCLossIgnoreLongerOutputsThanInputs sets the optional ignore_longer_outputs_than_inputs attribute to value.
-//
-// value: Scalar. If set to true, during CTC
-// calculation, items that have longer output sequences than input sequences
-// are skipped: they don't contribute to the loss term and have zero-gradient.
-// If not specified, defaults to false
-func CTCLossIgnoreLongerOutputsThanInputs(value bool) CTCLossAttr {
- return func(m optionalAttr) {
- m["ignore_longer_outputs_than_inputs"] = value
+ opspec := tf.OpSpec{
+ Type: "Log",
+ Input: []tf.Input{
+ x,
+ },
}
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Calculates the CTC Loss (log probability) for each batch entry. Also calculates
-//
-// the gradient. This class performs the softmax operation for you, so inputs
-// should be e.g. linear projections of outputs by an LSTM.
-//
-// Arguments:
-// inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
-// labels_indices: The indices of a `SparseTensor<int32, 2>`.
-// `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for
-// `(batch b, time t)`.
-// labels_values: The values (labels) associated with the given batch and time.
-// sequence_length: A vector containing sequence lengths (batch).
+// Rounds the values of a tensor to the nearest integer, element-wise.
//
-// Returns A vector (batch) containing log-probabilities.The gradient of `loss`. 3-D, shape:
-// `(max_time x batch_size x num_classes)`.
-func CTCLoss(scope *Scope, inputs tf.Output, labels_indices tf.Output, labels_values tf.Output, sequence_length tf.Output, optional ...CTCLossAttr) (loss tf.Output, gradient tf.Output) {
+// Rounds half to even. Also known as bankers rounding. If you want to round
+// according to the current system rounding mode use std::cint.
+func Round(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "CTCLoss",
+ Type: "Round",
Input: []tf.Input{
- inputs, labels_indices, labels_values, sequence_length,
+ x,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
+ return op.Output(0)
}
-// OrderedMapSizeAttr is an optional argument to OrderedMapSize.
-type OrderedMapSizeAttr func(optionalAttr)
+// RecordInputAttr is an optional argument to RecordInput.
+type RecordInputAttr func(optionalAttr)
-// OrderedMapSizeCapacity sets the optional capacity attribute to value.
-// If not specified, defaults to 0
+// RecordInputFileRandomSeed sets the optional file_random_seed attribute to value.
//
-// REQUIRES: value >= 0
-func OrderedMapSizeCapacity(value int64) OrderedMapSizeAttr {
+// value: Random seeds used to produce randomized records.
+// If not specified, defaults to 301
+func RecordInputFileRandomSeed(value int64) RecordInputAttr {
return func(m optionalAttr) {
- m["capacity"] = value
+ m["file_random_seed"] = value
}
}
-// OrderedMapSizeMemoryLimit sets the optional memory_limit attribute to value.
+// RecordInputFileShuffleShiftRatio sets the optional file_shuffle_shift_ratio attribute to value.
+//
+// value: Shifts the list of files after the list is randomly
+// shuffled.
// If not specified, defaults to 0
+func RecordInputFileShuffleShiftRatio(value float32) RecordInputAttr {
+ return func(m optionalAttr) {
+ m["file_shuffle_shift_ratio"] = value
+ }
+}
+
+// RecordInputFileBufferSize sets the optional file_buffer_size attribute to value.
//
-// REQUIRES: value >= 0
-func OrderedMapSizeMemoryLimit(value int64) OrderedMapSizeAttr {
+// value: The randomization shuffling buffer.
+// If not specified, defaults to 10000
+func RecordInputFileBufferSize(value int64) RecordInputAttr {
return func(m optionalAttr) {
- m["memory_limit"] = value
+ m["file_buffer_size"] = value
}
}
-// OrderedMapSizeContainer sets the optional container attribute to value.
-// If not specified, defaults to ""
-func OrderedMapSizeContainer(value string) OrderedMapSizeAttr {
+// RecordInputFileParallelism sets the optional file_parallelism attribute to value.
+//
+// value: How many sstables are opened and concurrently iterated over.
+// If not specified, defaults to 16
+func RecordInputFileParallelism(value int64) RecordInputAttr {
return func(m optionalAttr) {
- m["container"] = value
+ m["file_parallelism"] = value
}
}
-// OrderedMapSizeSharedName sets the optional shared_name attribute to value.
+// RecordInputBatchSize sets the optional batch_size attribute to value.
+//
+// value: The batch size.
+// If not specified, defaults to 32
+func RecordInputBatchSize(value int64) RecordInputAttr {
+ return func(m optionalAttr) {
+ m["batch_size"] = value
+ }
+}
+
+// RecordInputCompressionType sets the optional compression_type attribute to value.
+//
+// value: The type of compression for the file. Currently ZLIB and
+// GZIP are supported. Defaults to none.
// If not specified, defaults to ""
-func OrderedMapSizeSharedName(value string) OrderedMapSizeAttr {
+func RecordInputCompressionType(value string) RecordInputAttr {
return func(m optionalAttr) {
- m["shared_name"] = value
+ m["compression_type"] = value
}
}
-// Op returns the number of elements in the underlying container.
-func OrderedMapSize(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapSizeAttr) (size tf.Output) {
+// Emits randomized records.
+//
+// Arguments:
+// file_pattern: Glob pattern for the data files.
+//
+// Returns A tensor of shape [batch_size].
+func RecordInput(scope *Scope, file_pattern string, optional ...RecordInputAttr) (records tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtypes": dtypes}
+ attrs := map[string]interface{}{"file_pattern": file_pattern}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "OrderedMapSize",
+ Type: "RecordInput",
Attrs: attrs,
}
@@ -3233,175 +2646,225 @@ func OrderedMapSize(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapSi
return op.Output(0)
}
-// OrderedMapUnstageAttr is an optional argument to OrderedMapUnstage.
-type OrderedMapUnstageAttr func(optionalAttr)
-
-// OrderedMapUnstageCapacity sets the optional capacity attribute to value.
-// If not specified, defaults to 0
+// Computes reciprocal of square root of x element-wise.
//
-// REQUIRES: value >= 0
-func OrderedMapUnstageCapacity(value int64) OrderedMapUnstageAttr {
- return func(m optionalAttr) {
- m["capacity"] = value
+// I.e., \\(y = 1 / \sqrt{x}\\).
+func Rsqrt(scope *Scope, x tf.Output) (y tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "Rsqrt",
+ Input: []tf.Input{
+ x,
+ },
}
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// OrderedMapUnstageMemoryLimit sets the optional memory_limit attribute to value.
-// If not specified, defaults to 0
+// Inserts a dimension of 1 into a tensor's shape.
//
-// REQUIRES: value >= 0
-func OrderedMapUnstageMemoryLimit(value int64) OrderedMapUnstageAttr {
- return func(m optionalAttr) {
- m["memory_limit"] = value
+// Given a tensor `input`, this operation inserts a dimension of 1 at the
+// dimension index `axis` of `input`'s shape. The dimension index `axis` starts at
+// zero; if you specify a negative number for `axis` it is counted backward from
+// the end.
+//
+// This operation is useful if you want to add a batch dimension to a single
+// element. For example, if you have a single image of shape `[height, width,
+// channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
+// which will make the shape `[1, height, width, channels]`.
+//
+// Other examples:
+//
+// ```
+// # 't' is a tensor of shape [2]
+// shape(expand_dims(t, 0)) ==> [1, 2]
+// shape(expand_dims(t, 1)) ==> [2, 1]
+// shape(expand_dims(t, -1)) ==> [2, 1]
+//
+// # 't2' is a tensor of shape [2, 3, 5]
+// shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
+// shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
+// shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
+// ```
+//
+// This operation requires that:
+//
+// `-1-input.dims() <= dim <= input.dims()`
+//
+// This operation is related to `squeeze()`, which removes dimensions of
+// size 1.
+//
+// Arguments:
+//
+// axis: 0-D (scalar). Specifies the dimension index at which to
+// expand the shape of `input`. Must be in the range
+// `[-rank(input) - 1, rank(input)]`.
+//
+// Returns Contains the same data as `input`, but its shape has an additional
+// dimension of size 1 added.
+func ExpandDims(scope *Scope, input tf.Output, axis tf.Output) (output tf.Output) {
+ if scope.Err() != nil {
+ return
}
-}
-
-// OrderedMapUnstageContainer sets the optional container attribute to value.
-// If not specified, defaults to ""
-func OrderedMapUnstageContainer(value string) OrderedMapUnstageAttr {
- return func(m optionalAttr) {
- m["container"] = value
+ opspec := tf.OpSpec{
+ Type: "ExpandDims",
+ Input: []tf.Input{
+ input, axis,
+ },
}
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// OrderedMapUnstageSharedName sets the optional shared_name attribute to value.
-// If not specified, defaults to ""
-func OrderedMapUnstageSharedName(value string) OrderedMapUnstageAttr {
+// MatrixInverseAttr is an optional argument to MatrixInverse.
+type MatrixInverseAttr func(optionalAttr)
+
+// MatrixInverseAdjoint sets the optional adjoint attribute to value.
+// If not specified, defaults to false
+func MatrixInverseAdjoint(value bool) MatrixInverseAttr {
return func(m optionalAttr) {
- m["shared_name"] = value
+ m["adjoint"] = value
}
}
-// Op removes and returns the values associated with the key
+// Computes the inverse of one or more square invertible matrices or their
//
-// from the underlying container. If the underlying container
-// does not contain this key, the op will block until it does.
-func OrderedMapUnstage(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...OrderedMapUnstageAttr) (values []tf.Output) {
+// adjoints (conjugate transposes).
+//
+// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
+// form square matrices. The output is a tensor of the same shape as the input
+// containing the inverse for all input submatrices `[..., :, :]`.
+//
+// The op uses LU decomposition with partial pivoting to compute the inverses.
+//
+// If a matrix is not invertible there is no guarantee what the op does. It
+// may detect the condition and raise an exception or it may simply return a
+// garbage result.
+//
+// Arguments:
+// input: Shape is `[..., M, M]`.
+//
+// Returns Shape is `[..., M, M]`.
+//
+// @compatibility(numpy)
+// Equivalent to np.linalg.inv
+// @end_compatibility
+func MatrixInverse(scope *Scope, input tf.Output, optional ...MatrixInverseAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtypes": dtypes}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "OrderedMapUnstage",
+ Type: "MatrixInverse",
Input: []tf.Input{
- key, indices,
+ input,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- if scope.Err() != nil {
- return
- }
- var idx int
- var err error
- if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
- scope.UpdateErr("OrderedMapUnstage", err)
- return
- }
- return values
+ return op.Output(0)
}
-// MapIncompleteSizeAttr is an optional argument to MapIncompleteSize.
-type MapIncompleteSizeAttr func(optionalAttr)
-
-// MapIncompleteSizeCapacity sets the optional capacity attribute to value.
-// If not specified, defaults to 0
+// Computes square of x element-wise.
//
-// REQUIRES: value >= 0
-func MapIncompleteSizeCapacity(value int64) MapIncompleteSizeAttr {
- return func(m optionalAttr) {
- m["capacity"] = value
+// I.e., \\(y = x * x = x^2\\).
+func Square(scope *Scope, x tf.Output) (y tf.Output) {
+ if scope.Err() != nil {
+ return
}
-}
-
-// MapIncompleteSizeMemoryLimit sets the optional memory_limit attribute to value.
-// If not specified, defaults to 0
-//
-// REQUIRES: value >= 0
-func MapIncompleteSizeMemoryLimit(value int64) MapIncompleteSizeAttr {
- return func(m optionalAttr) {
- m["memory_limit"] = value
+ opspec := tf.OpSpec{
+ Type: "Square",
+ Input: []tf.Input{
+ x,
+ },
}
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// MapIncompleteSizeContainer sets the optional container attribute to value.
-// If not specified, defaults to ""
-func MapIncompleteSizeContainer(value string) MapIncompleteSizeAttr {
- return func(m optionalAttr) {
- m["container"] = value
+// Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise.
+//
+// See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
+// ](http://arxiv.org/abs/1511.07289)
+func Elu(scope *Scope, features tf.Output) (activations tf.Output) {
+ if scope.Err() != nil {
+ return
}
-}
-
-// MapIncompleteSizeSharedName sets the optional shared_name attribute to value.
-// If not specified, defaults to ""
-func MapIncompleteSizeSharedName(value string) MapIncompleteSizeAttr {
- return func(m optionalAttr) {
- m["shared_name"] = value
+ opspec := tf.OpSpec{
+ Type: "Elu",
+ Input: []tf.Input{
+ features,
+ },
}
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Op returns the number of incomplete elements in the underlying container.
-func MapIncompleteSize(scope *Scope, dtypes []tf.DataType, optional ...MapIncompleteSizeAttr) (size tf.Output) {
+// Computes the reciprocal of x element-wise.
+//
+// I.e., \\(y = 1 / x\\).
+func Reciprocal(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtypes": dtypes}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "MapIncompleteSize",
-
- Attrs: attrs,
+ Type: "Reciprocal",
+ Input: []tf.Input{
+ x,
+ },
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// MapSizeAttr is an optional argument to MapSize.
-type MapSizeAttr func(optionalAttr)
+// OrderedMapClearAttr is an optional argument to OrderedMapClear.
+type OrderedMapClearAttr func(optionalAttr)
-// MapSizeCapacity sets the optional capacity attribute to value.
+// OrderedMapClearCapacity sets the optional capacity attribute to value.
// If not specified, defaults to 0
//
// REQUIRES: value >= 0
-func MapSizeCapacity(value int64) MapSizeAttr {
+func OrderedMapClearCapacity(value int64) OrderedMapClearAttr {
return func(m optionalAttr) {
m["capacity"] = value
}
}
-// MapSizeMemoryLimit sets the optional memory_limit attribute to value.
+// OrderedMapClearMemoryLimit sets the optional memory_limit attribute to value.
// If not specified, defaults to 0
//
// REQUIRES: value >= 0
-func MapSizeMemoryLimit(value int64) MapSizeAttr {
+func OrderedMapClearMemoryLimit(value int64) OrderedMapClearAttr {
return func(m optionalAttr) {
m["memory_limit"] = value
}
}
-// MapSizeContainer sets the optional container attribute to value.
+// OrderedMapClearContainer sets the optional container attribute to value.
// If not specified, defaults to ""
-func MapSizeContainer(value string) MapSizeAttr {
+func OrderedMapClearContainer(value string) OrderedMapClearAttr {
return func(m optionalAttr) {
m["container"] = value
}
}
-// MapSizeSharedName sets the optional shared_name attribute to value.
+// OrderedMapClearSharedName sets the optional shared_name attribute to value.
// If not specified, defaults to ""
-func MapSizeSharedName(value string) MapSizeAttr {
+func OrderedMapClearSharedName(value string) OrderedMapClearAttr {
return func(m optionalAttr) {
m["shared_name"] = value
}
}
-// Op returns the number of elements in the underlying container.
-func MapSize(scope *Scope, dtypes []tf.DataType, optional ...MapSizeAttr) (size tf.Output) {
+// Op removes all elements in the underlying container.
+//
+// Returns the created operation.
+func OrderedMapClear(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapClearAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
@@ -3410,515 +2873,512 @@ func MapSize(scope *Scope, dtypes []tf.DataType, optional ...MapSizeAttr) (size
a(attrs)
}
opspec := tf.OpSpec{
- Type: "MapSize",
+ Type: "OrderedMapClear",
Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// MapUnstageAttr is an optional argument to MapUnstage.
-type MapUnstageAttr func(optionalAttr)
-
-// MapUnstageCapacity sets the optional capacity attribute to value.
-// If not specified, defaults to 0
+// Computes the reciprocal of x element-wise.
//
-// REQUIRES: value >= 0
-func MapUnstageCapacity(value int64) MapUnstageAttr {
- return func(m optionalAttr) {
- m["capacity"] = value
+// I.e., \\(y = 1 / x\\).
+func Inv(scope *Scope, x tf.Output) (y tf.Output) {
+ if scope.Err() != nil {
+ return
}
-}
-
-// MapUnstageMemoryLimit sets the optional memory_limit attribute to value.
-// If not specified, defaults to 0
-//
-// REQUIRES: value >= 0
-func MapUnstageMemoryLimit(value int64) MapUnstageAttr {
- return func(m optionalAttr) {
- m["memory_limit"] = value
+ opspec := tf.OpSpec{
+ Type: "Inv",
+ Input: []tf.Input{
+ x,
+ },
}
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// MapUnstageContainer sets the optional container attribute to value.
-// If not specified, defaults to ""
-func MapUnstageContainer(value string) MapUnstageAttr {
- return func(m optionalAttr) {
- m["container"] = value
- }
-}
+// ComplexAbsAttr is an optional argument to ComplexAbs.
+type ComplexAbsAttr func(optionalAttr)
-// MapUnstageSharedName sets the optional shared_name attribute to value.
-// If not specified, defaults to ""
-func MapUnstageSharedName(value string) MapUnstageAttr {
+// ComplexAbsTout sets the optional Tout attribute to value.
+// If not specified, defaults to DT_FLOAT
+func ComplexAbsTout(value tf.DataType) ComplexAbsAttr {
return func(m optionalAttr) {
- m["shared_name"] = value
+ m["Tout"] = value
}
}
-// Op removes and returns the values associated with the key
+// Computes the complex absolute value of a tensor.
//
-// from the underlying container. If the underlying container
-// does not contain this key, the op will block until it does.
-func MapUnstage(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...MapUnstageAttr) (values []tf.Output) {
+// Given a tensor `x` of complex numbers, this operation returns a tensor of type
+// `float` or `double` that is the absolute value of each element in `x`. All
+// elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute
+// value is computed as \\( \sqrt{a^2 + b^2}\\).
+func ComplexAbs(scope *Scope, x tf.Output, optional ...ComplexAbsAttr) (y tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtypes": dtypes}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "MapUnstage",
+ Type: "ComplexAbs",
Input: []tf.Input{
- key, indices,
+ x,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
+// Returns the truth value of x AND y element-wise.
+//
+// *NOTE*: `LogicalAnd` supports broadcasting. More about broadcasting
+// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+func LogicalAnd(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
- var idx int
- var err error
- if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
- scope.UpdateErr("MapUnstage", err)
- return
+ opspec := tf.OpSpec{
+ Type: "LogicalAnd",
+ Input: []tf.Input{
+ x, y,
+ },
}
- return values
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Forwards the value of an available tensor from `inputs` to `output`.
-//
-// `Merge` waits for at least one of the tensors in `inputs` to become available.
-// It is usually combined with `Switch` to implement branching.
-//
-// `Merge` forwards the first tensor to become available to `output`, and sets
-// `value_index` to its index in `inputs`.
-//
-// Arguments:
-// inputs: The input tensors, exactly one of which will become available.
-//
-// Returns Will be set to the available input tensor.The index of the chosen input tensor in `inputs`.
-func Merge(scope *Scope, inputs []tf.Output) (output tf.Output, value_index tf.Output) {
+// Cast x of type SrcT to y of DstT.
+func Cast(scope *Scope, x tf.Output, DstT tf.DataType) (y tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"DstT": DstT}
opspec := tf.OpSpec{
- Type: "Merge",
+ Type: "Cast",
Input: []tf.Input{
- tf.OutputList(inputs),
+ x,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
+ return op.Output(0)
}
-// MapPeekAttr is an optional argument to MapPeek.
-type MapPeekAttr func(optionalAttr)
+// MaxAttr is an optional argument to Max.
+type MaxAttr func(optionalAttr)
-// MapPeekCapacity sets the optional capacity attribute to value.
-// If not specified, defaults to 0
+// MaxKeepDims sets the optional keep_dims attribute to value.
//
-// REQUIRES: value >= 0
-func MapPeekCapacity(value int64) MapPeekAttr {
+// value: If true, retain reduced dimensions with length 1.
+// If not specified, defaults to false
+func MaxKeepDims(value bool) MaxAttr {
return func(m optionalAttr) {
- m["capacity"] = value
+ m["keep_dims"] = value
}
}
-// MapPeekMemoryLimit sets the optional memory_limit attribute to value.
-// If not specified, defaults to 0
+// Computes the maximum of elements across dimensions of a tensor.
//
-// REQUIRES: value >= 0
-func MapPeekMemoryLimit(value int64) MapPeekAttr {
- return func(m optionalAttr) {
- m["memory_limit"] = value
- }
-}
-
-// MapPeekContainer sets the optional container attribute to value.
-// If not specified, defaults to ""
-func MapPeekContainer(value string) MapPeekAttr {
- return func(m optionalAttr) {
- m["container"] = value
- }
-}
-
-// MapPeekSharedName sets the optional shared_name attribute to value.
-// If not specified, defaults to ""
-func MapPeekSharedName(value string) MapPeekAttr {
- return func(m optionalAttr) {
- m["shared_name"] = value
- }
-}
-
-// Op peeks at the values at the specified key. If the
+// Reduces `input` along the dimensions given in `axis`. Unless
+// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+// `axis`. If `keep_dims` is true, the reduced dimensions are
+// retained with length 1.
//
-// underlying container does not contain this key
-// this op will block until it does.
-func MapPeek(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...MapPeekAttr) (values []tf.Output) {
+// Arguments:
+// input: The tensor to reduce.
+// axis: The dimensions to reduce. Must be in the range
+// `[-rank(input), rank(input))`.
+//
+// Returns The reduced tensor.
+func Max(scope *Scope, input tf.Output, axis tf.Output, optional ...MaxAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtypes": dtypes}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "MapPeek",
+ Type: "Max",
Input: []tf.Input{
- key, indices,
+ input, axis,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- if scope.Err() != nil {
- return
- }
- var idx int
- var err error
- if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
- scope.UpdateErr("MapPeek", err)
- return
- }
- return values
+ return op.Output(0)
}
-// MapStageAttr is an optional argument to MapStage.
-type MapStageAttr func(optionalAttr)
-
-// MapStageCapacity sets the optional capacity attribute to value.
+// Quantized Batch normalization.
//
-// value: Maximum number of elements in the Staging Area. If > 0, inserts
-// on the container will block when the capacity is reached.
-// If not specified, defaults to 0
+// This op is deprecated and will be removed in the future. Prefer
+// `tf.nn.batch_normalization`.
//
-// REQUIRES: value >= 0
-func MapStageCapacity(value int64) MapStageAttr {
- return func(m optionalAttr) {
- m["capacity"] = value
- }
-}
-
-// MapStageMemoryLimit sets the optional memory_limit attribute to value.
-// If not specified, defaults to 0
+// Arguments:
+// t: A 4D input Tensor.
+// t_min: The value represented by the lowest quantized input.
+// t_max: The value represented by the highest quantized input.
+// m: A 1D mean Tensor with size matching the last dimension of t.
+// This is the first output from tf.nn.moments,
+// or a saved moving average thereof.
+// m_min: The value represented by the lowest quantized mean.
+// m_max: The value represented by the highest quantized mean.
+// v: A 1D variance Tensor with size matching the last dimension of t.
+// This is the second output from tf.nn.moments,
+// or a saved moving average thereof.
+// v_min: The value represented by the lowest quantized variance.
+// v_max: The value represented by the highest quantized variance.
+// beta: A 1D beta Tensor with size matching the last dimension of t.
+// An offset to be added to the normalized tensor.
+// beta_min: The value represented by the lowest quantized offset.
+// beta_max: The value represented by the highest quantized offset.
+// gamma: A 1D gamma Tensor with size matching the last dimension of t.
+// If "scale_after_normalization" is true, this tensor will be multiplied
+// with the normalized tensor.
+// gamma_min: The value represented by the lowest quantized gamma.
+// gamma_max: The value represented by the highest quantized gamma.
//
-// REQUIRES: value >= 0
-func MapStageMemoryLimit(value int64) MapStageAttr {
- return func(m optionalAttr) {
- m["memory_limit"] = value
+// variance_epsilon: A small float number to avoid dividing by 0.
+// scale_after_normalization: A bool indicating whether the resulted tensor
+// needs to be multiplied with gamma.
+func QuantizedBatchNormWithGlobalNormalization(scope *Scope, t tf.Output, t_min tf.Output, t_max tf.Output, m tf.Output, m_min tf.Output, m_max tf.Output, v tf.Output, v_min tf.Output, v_max tf.Output, beta tf.Output, beta_min tf.Output, beta_max tf.Output, gamma tf.Output, gamma_min tf.Output, gamma_max tf.Output, out_type tf.DataType, variance_epsilon float32, scale_after_normalization bool) (result tf.Output, result_min tf.Output, result_max tf.Output) {
+ if scope.Err() != nil {
+ return
}
-}
-
-// MapStageContainer sets the optional container attribute to value.
-//
-// value: If non-empty, this queue is placed in the given container. Otherwise,
-// a default container is used.
-// If not specified, defaults to ""
-func MapStageContainer(value string) MapStageAttr {
- return func(m optionalAttr) {
- m["container"] = value
+ attrs := map[string]interface{}{"out_type": out_type, "variance_epsilon": variance_epsilon, "scale_after_normalization": scale_after_normalization}
+ opspec := tf.OpSpec{
+ Type: "QuantizedBatchNormWithGlobalNormalization",
+ Input: []tf.Input{
+ t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max,
+ },
+ Attrs: attrs,
}
+ op := scope.AddOperation(opspec)
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// MapStageSharedName sets the optional shared_name attribute to value.
-//
-// value: It is necessary to match this name to the matching Unstage Op.
-// If not specified, defaults to ""
-func MapStageSharedName(value string) MapStageAttr {
+// HistogramFixedWidthAttr is an optional argument to HistogramFixedWidth.
+type HistogramFixedWidthAttr func(optionalAttr)
+
+// HistogramFixedWidthDtype sets the optional dtype attribute to value.
+// If not specified, defaults to DT_INT32
+func HistogramFixedWidthDtype(value tf.DataType) HistogramFixedWidthAttr {
return func(m optionalAttr) {
- m["shared_name"] = value
+ m["dtype"] = value
}
}
-// Stage (key, values) in the underlying container which behaves like a hashtable.
+// Return histogram of values.
//
-// Arguments:
-// key: int64
+// Given the tensor `values`, this operation returns a rank 1 histogram counting
+// the number of entries in `values` that fall into every bin. The bins are
+// equal width and determined by the arguments `value_range` and `nbins`.
//
-// values: a list of tensors
-// dtypes A list of data types that inserted values should adhere to.
+// ```python
+// # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
+// nbins = 5
+// value_range = [0.0, 5.0]
+// new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
//
+// with tf.get_default_session() as sess:
+// hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)
+// variables.global_variables_initializer().run()
+// sess.run(hist) => [2, 1, 1, 0, 2]
+// ```
//
-// Returns the created operation.
-func MapStage(scope *Scope, key tf.Output, indices tf.Output, values []tf.Output, dtypes []tf.DataType, optional ...MapStageAttr) (o *tf.Operation) {
+// Arguments:
+// values: Numeric `Tensor`.
+// value_range: Shape [2] `Tensor` of same `dtype` as `values`.
+// values <= value_range[0] will be mapped to hist[0],
+// values >= value_range[1] will be mapped to hist[-1].
+// nbins: Scalar `int32 Tensor`. Number of histogram bins.
+//
+// Returns A 1-D `Tensor` holding histogram of values.
+func HistogramFixedWidth(scope *Scope, values tf.Output, value_range tf.Output, nbins tf.Output, optional ...HistogramFixedWidthAttr) (out tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtypes": dtypes}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "MapStage",
+ Type: "HistogramFixedWidth",
Input: []tf.Input{
- key, indices, tf.OutputList(values),
+ values, value_range, nbins,
},
Attrs: attrs,
}
- return scope.AddOperation(opspec)
-}
-
-// DepthToSpaceAttr is an optional argument to DepthToSpace.
-type DepthToSpaceAttr func(optionalAttr)
-
-// DepthToSpaceDataFormat sets the optional data_format attribute to value.
-// If not specified, defaults to "NHWC"
-func DepthToSpaceDataFormat(value string) DepthToSpaceAttr {
- return func(m optionalAttr) {
- m["data_format"] = value
- }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// DepthToSpace for tensors of type T.
-//
-// Rearranges data from depth into blocks of spatial data.
-// This is the reverse transformation of SpaceToDepth. More specifically,
-// this op outputs a copy of the input tensor where values from the `depth`
-// dimension are moved in spatial blocks to the `height` and `width` dimensions.
-// The attr `block_size` indicates the input block size and how the data is moved.
-//
-// * Chunks of data of size `block_size * block_size` from depth are rearranged
-// into non-overlapping blocks of size `block_size x block_size`
-// * The width the output tensor is `input_depth * block_size`, whereas the
-// height is `input_height * block_size`.
-// * The Y, X coordinates within each block of the output image are determined
-// by the high order component of the input channel index.
-// * The depth of the input tensor must be divisible by
-// `block_size * block_size`.
-//
-// The `data_format` attr specifies the layout of the input and output tensors
-// with the following options:
-// "NHWC": `[ batch, height, width, channels ]`
-// "NCHW": `[ batch, channels, height, width ]`
-// "NCHW_VECT_C":
-// `qint8 [ batch, channels / 4, height, width, 4 ]`
-//
-// It is useful to consider the operation as transforming a 6-D Tensor.
-// e.g. for data_format = NHWC,
-// Each element in the input tensor can be specified via 6 coordinates,
-// ordered by decreasing memory layout significance as:
-// n,iY,iX,bY,bX,oC (where n=batch index, iX, iY means X or Y coordinates
-// within the input image, bX, bY means coordinates
-// within the output block, oC means output channels).
-// The output would be the input transposed to the following layout:
-// n,iY,bY,iX,bX,oC
-//
-// This operation is useful for resizing the activations between convolutions
-// (but keeping all data), e.g. instead of pooling. It is also useful for training
-// purely convolutional models.
-//
-// For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" and
-// block_size = 2:
-//
-// ```
-// x = [[[[1, 2, 3, 4]]]]
-//
-// ```
-//
-// This operation will output a tensor of shape `[1, 2, 2, 1]`:
-//
-// ```
-// [[[[1], [2]],
-// [[3], [4]]]]
-// ```
+// Creates summary database writer accessible by given resource handle.
//
-// Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`,
-// the corresponding output will have 2x2 elements and will have a depth of
-// 1 channel (1 = `4 / (block_size * block_size)`).
-// The output element shape is `[2, 2, 1]`.
+// This can be used to write tensors from the execution graph directly
+// to a database. Only SQLite is supported right now. This function
+// will create the schema if it doesn't exist. Entries in the Users,
+// Experiments, and Runs tables will be created automatically if they
+// don't already exist.
//
-// For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.
+// Arguments:
+// writer: Handle to SummaryWriter resource to overwrite.
+// db_uri: For example "file:/tmp/foo.sqlite".
+// experiment_name: Can't contain ASCII control characters or <>. Case
+// sensitive. If empty, then the Run will not be associated with any
+// Experiment.
+// run_name: Can't contain ASCII control characters or <>. Case sensitive.
+// If empty, then each Tag will not be associated with any Run.
+// user_name: Must be valid as both a DNS label and Linux username. If
+// empty, then the Experiment will not be associated with any User.
//
-// ```
-// x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
-// ```
+// Returns the created operation.
+func CreateSummaryDbWriter(scope *Scope, writer tf.Output, db_uri tf.Output, experiment_name tf.Output, run_name tf.Output, user_name tf.Output) (o *tf.Operation) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "CreateSummaryDbWriter",
+ Input: []tf.Input{
+ writer, db_uri, experiment_name, run_name, user_name,
+ },
+ }
+ return scope.AddOperation(opspec)
+}
+
+// Adds Tensor 'bias' to Tensor 'input' for Quantized types.
//
-// This operation, for block size of 2, will return the following tensor of shape
-// `[1, 2, 2, 3]`
+// Broadcasts the values of bias on dimensions 0..N-2 of 'input'.
//
-// ```
-// [[[[1, 2, 3], [4, 5, 6]],
-// [[7, 8, 9], [10, 11, 12]]]]
+// Arguments:
//
-// ```
+// bias: A 1D bias Tensor with size matching the last dimension of 'input'.
+// min_input: The float value that the lowest quantized input value represents.
+// max_input: The float value that the highest quantized input value represents.
+// min_bias: The float value that the lowest quantized bias value represents.
+// max_bias: The float value that the highest quantized bias value represents.
//
-// Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:
//
-// ```
-// x = [[[[1, 2, 3, 4],
-// [5, 6, 7, 8]],
-// [[9, 10, 11, 12],
-// [13, 14, 15, 16]]]]
-// ```
+// Returns The float value that the lowest quantized output value represents.The float value that the highest quantized output value represents.
+func QuantizedBiasAdd(scope *Scope, input tf.Output, bias tf.Output, min_input tf.Output, max_input tf.Output, min_bias tf.Output, max_bias tf.Output, out_type tf.DataType) (output tf.Output, min_out tf.Output, max_out tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{"out_type": out_type}
+ opspec := tf.OpSpec{
+ Type: "QuantizedBiasAdd",
+ Input: []tf.Input{
+ input, bias, min_input, max_input, min_bias, max_bias,
+ },
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0), op.Output(1), op.Output(2)
+}
+
+// Produces the average pool of the input tensor for quantized types.
//
-// the operator will return the following tensor of shape `[1 4 4 1]`:
+// Arguments:
+// input: 4-D with shape `[batch, height, width, channels]`.
+// min_input: The float value that the lowest quantized input value represents.
+// max_input: The float value that the highest quantized input value represents.
+// ksize: The size of the window for each dimension of the input tensor.
+// The length must be 4 to match the number of dimensions of the input.
+// strides: The stride of the sliding window for each dimension of the input
+// tensor. The length must be 4 to match the number of dimensions of the input.
+// padding: The type of padding algorithm to use.
//
-// ```
-// x = [[[ [1], [2], [5], [6]],
-// [ [3], [4], [7], [8]],
-// [ [9], [10], [13], [14]],
-// [ [11], [12], [15], [16]]]]
+// Returns The float value that the lowest quantized output value represents.The float value that the highest quantized output value represents.
+func QuantizedAvgPool(scope *Scope, input tf.Output, min_input tf.Output, max_input tf.Output, ksize []int64, strides []int64, padding string) (output tf.Output, min_output tf.Output, max_output tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
+ opspec := tf.OpSpec{
+ Type: "QuantizedAvgPool",
+ Input: []tf.Input{
+ input, min_input, max_input,
+ },
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0), op.Output(1), op.Output(2)
+}
+
+// Updates the table to associates keys with values.
//
-// ```
+// The tensor `keys` must be of the same type as the keys of the table.
+// The tensor `values` must be of the type of the table values.
//
// Arguments:
+// table_handle: Handle to the table.
+// keys: Any shape. Keys to look up.
+// values: Values to associate with keys.
//
-// block_size: The size of the spatial block, same as in Space2Depth.
-func DepthToSpace(scope *Scope, input tf.Output, block_size int64, optional ...DepthToSpaceAttr) (output tf.Output) {
+// Returns the created operation.
+func LookupTableInsertV2(scope *Scope, table_handle tf.Output, keys tf.Output, values tf.Output) (o *tf.Operation) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"block_size": block_size}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "DepthToSpace",
+ Type: "LookupTableInsertV2",
Input: []tf.Input{
- input,
+ table_handle, keys, values,
},
- Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// StagePeekAttr is an optional argument to StagePeek.
-type StagePeekAttr func(optionalAttr)
+// FractionalAvgPoolAttr is an optional argument to FractionalAvgPool.
+type FractionalAvgPoolAttr func(optionalAttr)
-// StagePeekCapacity sets the optional capacity attribute to value.
-// If not specified, defaults to 0
+// FractionalAvgPoolPseudoRandom sets the optional pseudo_random attribute to value.
//
-// REQUIRES: value >= 0
-func StagePeekCapacity(value int64) StagePeekAttr {
+// value: When set to True, generates the pooling sequence in a
+// pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin
+// Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for
+// difference between pseudorandom and random.
+// If not specified, defaults to false
+func FractionalAvgPoolPseudoRandom(value bool) FractionalAvgPoolAttr {
return func(m optionalAttr) {
- m["capacity"] = value
+ m["pseudo_random"] = value
}
}
-// StagePeekMemoryLimit sets the optional memory_limit attribute to value.
-// If not specified, defaults to 0
+// FractionalAvgPoolOverlapping sets the optional overlapping attribute to value.
//
-// REQUIRES: value >= 0
-func StagePeekMemoryLimit(value int64) StagePeekAttr {
+// value: When set to True, it means when pooling, the values at the boundary
+// of adjacent pooling cells are used by both cells. For example:
+//
+// `index 0 1 2 3 4`
+//
+// `value 20 5 16 3 7`
+//
+// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
+// The result would be [41/3, 26/3] for fractional avg pooling.
+// If not specified, defaults to false
+func FractionalAvgPoolOverlapping(value bool) FractionalAvgPoolAttr {
return func(m optionalAttr) {
- m["memory_limit"] = value
+ m["overlapping"] = value
}
}
-// StagePeekContainer sets the optional container attribute to value.
-// If not specified, defaults to ""
-func StagePeekContainer(value string) StagePeekAttr {
+// FractionalAvgPoolDeterministic sets the optional deterministic attribute to value.
+//
+// value: When set to True, a fixed pooling region will be used when
+// iterating over a FractionalAvgPool node in the computation graph. Mainly used
+// in unit test to make FractionalAvgPool deterministic.
+// If not specified, defaults to false
+func FractionalAvgPoolDeterministic(value bool) FractionalAvgPoolAttr {
return func(m optionalAttr) {
- m["container"] = value
+ m["deterministic"] = value
}
}
-// StagePeekSharedName sets the optional shared_name attribute to value.
-// If not specified, defaults to ""
-func StagePeekSharedName(value string) StagePeekAttr {
+// FractionalAvgPoolSeed sets the optional seed attribute to value.
+//
+// value: If either seed or seed2 are set to be non-zero, the random number
+// generator is seeded by the given seed. Otherwise, it is seeded by a
+// random seed.
+// If not specified, defaults to 0
+func FractionalAvgPoolSeed(value int64) FractionalAvgPoolAttr {
return func(m optionalAttr) {
- m["shared_name"] = value
+ m["seed"] = value
}
}
-// Op peeks at the values at the specified index. If the
+// FractionalAvgPoolSeed2 sets the optional seed2 attribute to value.
//
-// underlying container does not contain sufficient elements
-// this op will block until it does. This Op is optimized for
-// performance.
-func StagePeek(scope *Scope, index tf.Output, dtypes []tf.DataType, optional ...StagePeekAttr) (values []tf.Output) {
+// value: An second seed to avoid seed collision.
+// If not specified, defaults to 0
+func FractionalAvgPoolSeed2(value int64) FractionalAvgPoolAttr {
+ return func(m optionalAttr) {
+ m["seed2"] = value
+ }
+}
+
+// Performs fractional average pooling on the input.
+//
+// Fractional average pooling is similar to Fractional max pooling in the pooling
+// region generation step. The only difference is that after pooling regions are
+// generated, a mean operation is performed instead of a max operation in each
+// pooling region.
+//
+// Arguments:
+// value: 4-D with shape `[batch, height, width, channels]`.
+// pooling_ratio: Pooling ratio for each dimension of `value`, currently only
+// supports row and col dimension and should be >= 1.0. For example, a valid
+// pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements
+// must be 1.0 because we don't allow pooling on batch and channels
+// dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions
+// respectively.
+//
+// Returns output tensor after fractional avg pooling.row pooling sequence, needed to calculate gradient.column pooling sequence, needed to calculate gradient.
+func FractionalAvgPool(scope *Scope, value tf.Output, pooling_ratio []float32, optional ...FractionalAvgPoolAttr) (output tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtypes": dtypes}
+ attrs := map[string]interface{}{"pooling_ratio": pooling_ratio}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "StagePeek",
+ Type: "FractionalAvgPool",
Input: []tf.Input{
- index,
+ value,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- if scope.Err() != nil {
- return
- }
- var idx int
- var err error
- if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
- scope.UpdateErr("StagePeek", err)
- return
- }
- return values
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// StageAttr is an optional argument to Stage.
-type StageAttr func(optionalAttr)
+// RandomCropAttr is an optional argument to RandomCrop.
+type RandomCropAttr func(optionalAttr)
-// StageCapacity sets the optional capacity attribute to value.
+// RandomCropSeed sets the optional seed attribute to value.
//
-// value: Maximum number of elements in the Staging Area. If > 0, inserts
-// on the container will block when the capacity is reached.
+// value: If either seed or seed2 are set to be non-zero, the random number
+// generator is seeded by the given seed. Otherwise, it is seeded by a
+// random seed.
// If not specified, defaults to 0
-//
-// REQUIRES: value >= 0
-func StageCapacity(value int64) StageAttr {
+func RandomCropSeed(value int64) RandomCropAttr {
return func(m optionalAttr) {
- m["capacity"] = value
+ m["seed"] = value
}
}
-// StageMemoryLimit sets the optional memory_limit attribute to value.
+// RandomCropSeed2 sets the optional seed2 attribute to value.
//
-// value: The maximum number of bytes allowed for Tensors in the Staging Area.
-// If > 0, inserts will block until sufficient space is available.
+// value: An second seed to avoid seed collision.
// If not specified, defaults to 0
-//
-// REQUIRES: value >= 0
-func StageMemoryLimit(value int64) StageAttr {
+func RandomCropSeed2(value int64) RandomCropAttr {
return func(m optionalAttr) {
- m["memory_limit"] = value
+ m["seed2"] = value
}
}
-// StageContainer sets the optional container attribute to value.
+// Randomly crop `image`.
//
-// value: If non-empty, this queue is placed in the given container. Otherwise,
-// a default container is used.
-// If not specified, defaults to ""
-func StageContainer(value string) StageAttr {
- return func(m optionalAttr) {
- m["container"] = value
- }
-}
-
-// StageSharedName sets the optional shared_name attribute to value.
+// DEPRECATED at GraphDef version 8: Random crop is now pure Python
//
-// value: It is necessary to match this name to the matching Unstage Op.
-// If not specified, defaults to ""
-func StageSharedName(value string) StageAttr {
- return func(m optionalAttr) {
- m["shared_name"] = value
- }
-}
-
-// Stage values similar to a lightweight Enqueue.
+// `size` is a 1-D int64 tensor with 2 elements representing the crop height and
+// width. The values must be non negative.
//
-// The basic functionality of this Op is similar to a queue with many
-// fewer capabilities and options. This Op is optimized for performance.
+// This Op picks a random location in `image` and crops a `height` by `width`
+// rectangle from that location. The random location is picked so the cropped
+// area will fit inside the original image.
//
// Arguments:
-// values: a list of tensors
-// dtypes A list of data types that inserted values should adhere to.
+// image: 3-D of shape `[height, width, channels]`.
+// size: 1-D of length 2 containing: `crop_height`, `crop_width`..
//
-// Returns the created operation.
-func Stage(scope *Scope, values []tf.Output, optional ...StageAttr) (o *tf.Operation) {
+// Returns 3-D of shape `[crop_height, crop_width, channels].`
+func RandomCrop(scope *Scope, image tf.Output, size tf.Output, optional ...RandomCropAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -3927,60 +3387,50 @@ func Stage(scope *Scope, values []tf.Output, optional ...StageAttr) (o *tf.Opera
a(attrs)
}
opspec := tf.OpSpec{
- Type: "Stage",
+ Type: "RandomCrop",
Input: []tf.Input{
- tf.OutputList(values),
+ image, size,
},
Attrs: attrs,
}
- return scope.AddOperation(opspec)
-}
-
-// FakeQuantWithMinMaxArgsAttr is an optional argument to FakeQuantWithMinMaxArgs.
-type FakeQuantWithMinMaxArgsAttr func(optionalAttr)
-
-// FakeQuantWithMinMaxArgsMin sets the optional min attribute to value.
-// If not specified, defaults to -6
-func FakeQuantWithMinMaxArgsMin(value float32) FakeQuantWithMinMaxArgsAttr {
- return func(m optionalAttr) {
- m["min"] = value
- }
-}
-
-// FakeQuantWithMinMaxArgsMax sets the optional max attribute to value.
-// If not specified, defaults to 6
-func FakeQuantWithMinMaxArgsMax(value float32) FakeQuantWithMinMaxArgsAttr {
- return func(m optionalAttr) {
- m["max"] = value
- }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// FakeQuantWithMinMaxArgsNumBits sets the optional num_bits attribute to value.
-// If not specified, defaults to 8
-func FakeQuantWithMinMaxArgsNumBits(value int64) FakeQuantWithMinMaxArgsAttr {
- return func(m optionalAttr) {
- m["num_bits"] = value
- }
-}
+// TopKV2Attr is an optional argument to TopKV2.
+type TopKV2Attr func(optionalAttr)
-// FakeQuantWithMinMaxArgsNarrowRange sets the optional narrow_range attribute to value.
-// If not specified, defaults to false
-func FakeQuantWithMinMaxArgsNarrowRange(value bool) FakeQuantWithMinMaxArgsAttr {
+// TopKV2Sorted sets the optional sorted attribute to value.
+//
+// value: If true the resulting `k` elements will be sorted by the values in
+// descending order.
+// If not specified, defaults to true
+func TopKV2Sorted(value bool) TopKV2Attr {
return func(m optionalAttr) {
- m["narrow_range"] = value
+ m["sorted"] = value
}
}
-// Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type.
+// Finds values and indices of the `k` largest elements for the last dimension.
//
-// Attributes `[min; max]` define the clamping range for the `inputs` data.
-// `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
-// when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
-// then de-quantized and output as floats in `[min; max]` interval.
-// `num_bits` is the bitwidth of the quantization; between 2 and 8, inclusive.
+// If the input is a vector (rank-1), finds the `k` largest entries in the vector
+// and outputs their values and indices as vectors. Thus `values[j]` is the
+// `j`-th largest entry in `input`, and its index is `indices[j]`.
//
-// Quantization is called fake since the output is still in floating point.
-func FakeQuantWithMinMaxArgs(scope *Scope, inputs tf.Output, optional ...FakeQuantWithMinMaxArgsAttr) (outputs tf.Output) {
+// For matrices (resp. higher rank input), computes the top `k` entries in each
+// row (resp. vector along the last dimension). Thus,
+//
+// values.shape = indices.shape = input.shape[:-1] + [k]
+//
+// If two elements are equal, the lower-index element appears first.
+//
+// Arguments:
+// input: 1-D or higher with last dimension at least `k`.
+// k: 0-D. Number of top elements to look for along the last dimension (along each
+// row for matrices).
+//
+// Returns The `k` largest elements along each last dimensional slice.The indices of `values` within the last dimension of `input`.
+func TopKV2(scope *Scope, input tf.Output, k tf.Output, optional ...TopKV2Attr) (values tf.Output, indices tf.Output) {
if scope.Err() != nil {
return
}
@@ -3989,431 +3439,575 @@ func FakeQuantWithMinMaxArgs(scope *Scope, inputs tf.Output, optional ...FakeQua
a(attrs)
}
opspec := tf.OpSpec{
- Type: "FakeQuantWithMinMaxArgs",
+ Type: "TopKV2",
Input: []tf.Input{
- inputs,
+ input, k,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1)
}
-// Deprecated. Use TensorArraySizeV3
-func TensorArraySizeV2(scope *Scope, handle tf.Output, flow_in tf.Output) (size tf.Output) {
+// Returns x // y element-wise.
+//
+// *NOTE*: `FloorDiv` supports broadcasting. More about broadcasting
+// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+func FloorDiv(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "TensorArraySizeV2",
+ Type: "FloorDiv",
Input: []tf.Input{
- handle, flow_in,
+ x, y,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Deprecated. Use TensorArrayScatterV3
-func TensorArrayScatterV2(scope *Scope, handle tf.Output, indices tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output) {
+// Returns a batched diagonal tensor with a given batched diagonal values.
+//
+// Given a `diagonal`, this operation returns a tensor with the `diagonal` and
+// everything else padded with zeros. The diagonal is computed as follows:
+//
+// Assume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a
+// tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:
+//
+// `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.
+//
+// For example:
+//
+// ```
+// # 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]]
+//
+// and diagonal.shape = (2, 4)
+//
+// tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0]
+// [0, 2, 0, 0]
+// [0, 0, 3, 0]
+// [0, 0, 0, 4]],
+// [[5, 0, 0, 0]
+// [0, 6, 0, 0]
+// [0, 0, 7, 0]
+// [0, 0, 0, 8]]]
+//
+// which has shape (2, 4, 4)
+// ```
+//
+// Arguments:
+// diagonal: Rank `k`, where `k >= 1`.
+//
+// Returns Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`.
+func MatrixDiag(scope *Scope, diagonal tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "TensorArrayScatterV2",
+ Type: "MatrixDiag",
Input: []tf.Input{
- handle, indices, value, flow_in,
+ diagonal,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Deprecated. Use TensorArrayGradV3
-func TensorArrayWriteV2(scope *Scope, handle tf.Output, index tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output) {
+// Says whether the targets are in the top `K` predictions.
+//
+// This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
+// prediction for the target class is among the top `k` predictions among
+// all predictions for example `i`. Note that the behavior of `InTopK` differs
+// from the `TopK` op in its handling of ties; if multiple classes have the
+// same prediction value and straddle the top-`k` boundary, all of those
+// classes are considered to be in the top `k`.
+//
+// More formally, let
+//
+// \\(predictions_i\\) be the predictions for all classes for example `i`,
+// \\(targets_i\\) be the target class for example `i`,
+// \\(out_i\\) be the output for example `i`,
+//
+// $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
+//
+// Arguments:
+// predictions: A `batch_size` x `classes` tensor.
+// targets: A `batch_size` vector of class ids.
+// k: Number of top elements to look at for computing precision.
+//
+// Returns Computed Precision at `k` as a `bool Tensor`.
+func InTopK(scope *Scope, predictions tf.Output, targets tf.Output, k int64) (precision tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"k": k}
opspec := tf.OpSpec{
- Type: "TensorArrayWriteV2",
+ Type: "InTopK",
Input: []tf.Input{
- handle, index, value, flow_in,
+ predictions, targets,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Get the current size of the TensorArray.
+// Given a quantized tensor described by (input, input_min, input_max), outputs a
+//
+// range that covers the actual values present in that tensor. This op is
+// typically used to produce the requested_output_min and requested_output_max for
+// Requantize.
//
// Arguments:
-// handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
-// flow_in: A float scalar that enforces proper chaining of operations.
//
-// Returns The current size of the TensorArray.
-func TensorArraySizeV3(scope *Scope, handle tf.Output, flow_in tf.Output) (size tf.Output) {
+// input_min: The float value that the minimum quantized input value represents.
+// input_max: The float value that the maximum quantized input value represents.
+//
+// Returns The computed min output.the computed max output.
+func RequantizationRange(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output) (output_min tf.Output, output_max tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "TensorArraySizeV3",
+ Type: "RequantizationRange",
Input: []tf.Input{
- handle, flow_in,
+ input, input_min, input_max,
},
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1)
}
-// LearnedUnigramCandidateSamplerAttr is an optional argument to LearnedUnigramCandidateSampler.
-type LearnedUnigramCandidateSamplerAttr func(optionalAttr)
-
-// LearnedUnigramCandidateSamplerSeed sets the optional seed attribute to value.
+// Returns the truth value of (x <= y) element-wise.
//
-// value: If either seed or seed2 are set to be non-zero, the random number
-// generator is seeded by the given seed. Otherwise, it is seeded by a
-// random seed.
-// If not specified, defaults to 0
-func LearnedUnigramCandidateSamplerSeed(value int64) LearnedUnigramCandidateSamplerAttr {
- return func(m optionalAttr) {
- m["seed"] = value
+// *NOTE*: `LessEqual` supports broadcasting. More about broadcasting
+// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+func LessEqual(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
+ if scope.Err() != nil {
+ return
}
+ opspec := tf.OpSpec{
+ Type: "LessEqual",
+ Input: []tf.Input{
+ x, y,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// LearnedUnigramCandidateSamplerSeed2 sets the optional seed2 attribute to value.
+// Computes softmax activations.
//
-// value: An second seed to avoid seed collision.
+// For each batch `i` and class `j` we have
+//
+// softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))
+//
+// Arguments:
+// logits: 2-D with shape `[batch_size, num_classes]`.
+//
+// Returns Same shape as `logits`.
+func Softmax(scope *Scope, logits tf.Output) (softmax tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "Softmax",
+ Input: []tf.Input{
+ logits,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
+// DecodeBmpAttr is an optional argument to DecodeBmp.
+type DecodeBmpAttr func(optionalAttr)
+
+// DecodeBmpChannels sets the optional channels attribute to value.
// If not specified, defaults to 0
-func LearnedUnigramCandidateSamplerSeed2(value int64) LearnedUnigramCandidateSamplerAttr {
+func DecodeBmpChannels(value int64) DecodeBmpAttr {
return func(m optionalAttr) {
- m["seed2"] = value
+ m["channels"] = value
}
}
-// Generates labels for candidate sampling with a learned unigram distribution.
+// Decode the first frame of a BMP-encoded image to a uint8 tensor.
//
-// See explanations of candidate sampling and the data formats at
-// go/candidate-sampling.
+// The attr `channels` indicates the desired number of color channels for the
+// decoded image.
//
-// For each batch, this op picks a single set of sampled candidate labels.
+// Accepted values are:
//
-// The advantages of sampling candidates per-batch are simplicity and the
-// possibility of efficient dense matrix multiplication. The disadvantage is that
-// the sampled candidates must be chosen independently of the context and of the
-// true labels.
+// * 0: Use the number of channels in the BMP-encoded image.
+// * 3: output an RGB image.
+// * 4: output an RGBA image.
//
// Arguments:
-// true_classes: A batch_size * num_true matrix, in which each row contains the
-// IDs of the num_true target_classes in the corresponding original label.
-// num_true: Number of true labels per context.
-// num_sampled: Number of candidates to randomly sample.
-// unique: If unique is true, we sample with rejection, so that all sampled
-// candidates in a batch are unique. This requires some approximation to
-// estimate the post-rejection sampling probabilities.
-// range_max: The sampler will sample integers from the interval [0, range_max).
+// contents: 0-D. The BMP-encoded image.
//
-// Returns A vector of length num_sampled, in which each element is
-// the ID of a sampled candidate.A batch_size * num_true matrix, representing
-// the number of times each candidate is expected to occur in a batch
-// of sampled candidates. If unique=true, then this is a probability.A vector of length num_sampled, for each sampled
-// candidate representing the number of times the candidate is expected
-// to occur in a batch of sampled candidates. If unique=true, then this is a
-// probability.
-func LearnedUnigramCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...LearnedUnigramCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
+// Returns 3-D with shape `[height, width, channels]`. RGB order
+func DecodeBmp(scope *Scope, contents tf.Output, optional ...DecodeBmpAttr) (image tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "LearnedUnigramCandidateSampler",
+ Type: "DecodeBmp",
Input: []tf.Input{
- true_classes,
+ contents,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
+ return op.Output(0)
}
-// Split the data from the input value into TensorArray elements.
+// Computes softsign gradients for a softsign operation.
//
-// Assuming that `lengths` takes on values
+// Arguments:
+// gradients: The backpropagated gradients to the corresponding softsign operation.
+// features: The features passed as input to the corresponding softsign operation.
//
-// ```(n0, n1, ..., n(T-1))```
+// Returns The gradients: `gradients / (1 + abs(features)) ** 2`.
+func SoftsignGrad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "SoftsignGrad",
+ Input: []tf.Input{
+ gradients, features,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
+// BatchMatMulAttr is an optional argument to BatchMatMul.
+type BatchMatMulAttr func(optionalAttr)
+
+// BatchMatMulAdjX sets the optional adj_x attribute to value.
//
-// and that `value` has shape
+// value: If `True`, adjoint the slices of `x`. Defaults to `False`.
+// If not specified, defaults to false
+func BatchMatMulAdjX(value bool) BatchMatMulAttr {
+ return func(m optionalAttr) {
+ m["adj_x"] = value
+ }
+}
+
+// BatchMatMulAdjY sets the optional adj_y attribute to value.
//
-// ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```,
+// value: If `True`, adjoint the slices of `y`. Defaults to `False`.
+// If not specified, defaults to false
+func BatchMatMulAdjY(value bool) BatchMatMulAttr {
+ return func(m optionalAttr) {
+ m["adj_y"] = value
+ }
+}
+
+// Multiplies slices of two tensors in batches.
//
-// this splits values into a TensorArray with T tensors.
+// Multiplies all slices of `Tensor` `x` and `y` (each slice can be
+// viewed as an element of a batch), and arranges the individual results
+// in a single output tensor of the same batch size. Each of the
+// individual slices can optionally be adjointed (to adjoint a matrix
+// means to transpose and conjugate it) before multiplication by setting
+// the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
//
-// TensorArray index t will be the subtensor of values with starting position
+// The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`
+// and `[..., r_y, c_y]`.
//
-// ```(n0 + n1 + ... + n(t-1), 0, 0, ...)```
+// The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:
//
-// and having size
+// r_o = c_x if adj_x else r_x
+// c_o = r_y if adj_y else c_y
//
-// ```nt x d0 x d1 x ...```
+// It is computed as:
+//
+// output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
//
// Arguments:
-// handle: The handle to a TensorArray.
-// value: The concatenated tensor to write to the TensorArray.
-// lengths: The vector of lengths, how to split the rows of value into the
-// TensorArray.
-// flow_in: A float scalar that enforces proper chaining of operations.
+// x: 2-D or higher with shape `[..., r_x, c_x]`.
+// y: 2-D or higher with shape `[..., r_y, c_y]`.
//
-// Returns A float scalar that enforces proper chaining of operations.
-func TensorArraySplitV3(scope *Scope, handle tf.Output, value tf.Output, lengths tf.Output, flow_in tf.Output) (flow_out tf.Output) {
+// Returns 3-D or higher with shape `[..., r_o, c_o]`
+func BatchMatMul(scope *Scope, x tf.Output, y tf.Output, optional ...BatchMatMulAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "TensorArraySplitV3",
+ Type: "BatchMatMul",
Input: []tf.Input{
- handle, value, lengths, flow_in,
+ x, y,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Returns a diagonal tensor with a given diagonal values.
+// Pads a tensor.
//
-// Given a `diagonal`, this operation returns a tensor with the `diagonal` and
-// everything else padded with zeros. The diagonal is computed as follows:
+// This operation pads `input` according to the `paddings` and `constant_values`
+// you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is
+// the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
+// how many padding values to add before the contents of `input` in that dimension,
+// and `paddings[D, 1]` indicates how many padding values to add after the contents
+// of `input` in that dimension. `constant_values` is a scalar tensor of the same
+// type as `input` that indicates the value to use for padding `input`.
//
-// Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of
-// rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:
+// The padded size of each dimension D of the output is:
//
-// `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.
+// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
//
// For example:
//
// ```
-// # 'diagonal' is [1, 2, 3, 4]
-// tf.diag(diagonal) ==> [[1, 0, 0, 0]
-// [0, 2, 0, 0]
-// [0, 0, 3, 0]
-// [0, 0, 0, 4]]
+// # 't' is [[1, 1], [2, 2]]
+// # 'paddings' is [[1, 1], [2, 2]]
+// # 'constant_values' is 0
+// # rank of 't' is 2
+// pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
+// [0, 0, 1, 1, 0, 0]
+// [0, 0, 2, 2, 0, 0]
+// [0, 0, 0, 0, 0, 0]]
// ```
-//
-// Arguments:
-// diagonal: Rank k tensor where k is at most 1.
-func Diag(scope *Scope, diagonal tf.Output) (output tf.Output) {
+func PadV2(scope *Scope, input tf.Output, paddings tf.Output, constant_values tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Diag",
+ Type: "PadV2",
Input: []tf.Input{
- diagonal,
+ input, paddings, constant_values,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// TensorArrayConcatV3Attr is an optional argument to TensorArrayConcatV3.
-type TensorArrayConcatV3Attr func(optionalAttr)
-
-// TensorArrayConcatV3ElementShapeExcept0 sets the optional element_shape_except0 attribute to value.
+// Returns which elements of x are NaN.
//
-// value: The expected shape of an element, if known,
-// excluding the first dimension. Used to validate the shapes of
-// TensorArray elements. If this shape is not fully specified, concatenating
-// zero-size TensorArrays is an error.
-// If not specified, defaults to <unknown_rank:true >
-func TensorArrayConcatV3ElementShapeExcept0(value tf.Shape) TensorArrayConcatV3Attr {
- return func(m optionalAttr) {
- m["element_shape_except0"] = value
+// @compatibility(numpy)
+// Equivalent to np.isnan
+// @end_compatibility
+func IsNan(scope *Scope, x tf.Output) (y tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "IsNan",
+ Input: []tf.Input{
+ x,
+ },
}
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Concat the elements from the TensorArray into value `value`.
+// FractionalAvgPoolGradAttr is an optional argument to FractionalAvgPoolGrad.
+type FractionalAvgPoolGradAttr func(optionalAttr)
+
+// FractionalAvgPoolGradOverlapping sets the optional overlapping attribute to value.
//
-// Takes `T` elements of shapes
+// value: When set to True, it means when pooling, the values at the boundary
+// of adjacent pooling cells are used by both cells. For example:
//
-// ```
-// (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...)
-// ```
+// `index 0 1 2 3 4`
//
-// and concatenates them into a Tensor of shape:
+// `value 20 5 16 3 7`
//
-// ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```
+// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
+// The result would be [41/3, 26/3] for fractional avg pooling.
+// If not specified, defaults to false
+func FractionalAvgPoolGradOverlapping(value bool) FractionalAvgPoolGradAttr {
+ return func(m optionalAttr) {
+ m["overlapping"] = value
+ }
+}
+
+// Computes gradient of the FractionalAvgPool function.
//
-// All elements must have the same shape (excepting the first dimension).
+// Unlike FractionalMaxPoolGrad, we don't need to find arg_max for
+// FractionalAvgPoolGrad, we just need to evenly back-propagate each element of
+// out_backprop to those indices that form the same pooling cell. Therefore, we
+// just need to know the shape of original input tensor, instead of the whole
+// tensor.
//
// Arguments:
-// handle: The handle to a TensorArray.
-// flow_in: A float scalar that enforces proper chaining of operations.
-// dtype: The type of the elem that is returned.
+// orig_input_tensor_shape: Original input tensor shape for `fractional_avg_pool`
+// out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients
+// w.r.t. the output of `fractional_avg_pool`.
+// row_pooling_sequence: row pooling sequence, form pooling region with
+// col_pooling_sequence.
+// col_pooling_sequence: column pooling sequence, form pooling region with
+// row_pooling sequence.
//
-// Returns All of the elements in the TensorArray, concatenated along the first
-// axis.A vector of the row sizes of the original T elements in the
-// value output. In the example above, this would be the values:
-// `(n1, n2, ..., n(T-1))`.
-func TensorArrayConcatV3(scope *Scope, handle tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayConcatV3Attr) (value tf.Output, lengths tf.Output) {
+// Returns 4-D. Gradients w.r.t. the input of `fractional_avg_pool`.
+func FractionalAvgPoolGrad(scope *Scope, orig_input_tensor_shape tf.Output, out_backprop tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output, optional ...FractionalAvgPoolGradAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtype": dtype}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "TensorArrayConcatV3",
+ Type: "FractionalAvgPoolGrad",
Input: []tf.Input{
- handle, flow_in,
+ orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
+ return op.Output(0)
}
-// Scatter the data from the input value into specific TensorArray elements.
-//
-// `indices` must be a vector, its length must match the first dim of `value`.
+// Computes gradients for the exponential linear (Elu) operation.
//
// Arguments:
-// handle: The handle to a TensorArray.
-// indices: The locations at which to write the tensor elements.
-// value: The concatenated tensor to write to the TensorArray.
-// flow_in: A float scalar that enforces proper chaining of operations.
+// gradients: The backpropagated gradients to the corresponding Elu operation.
+// outputs: The outputs of the corresponding Elu operation.
//
-// Returns A float scalar that enforces proper chaining of operations.
-func TensorArrayScatterV3(scope *Scope, handle tf.Output, indices tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output) {
+// Returns The gradients: `gradients * (outputs + 1)` if outputs < 0,
+// `gradients` otherwise.
+func EluGrad(scope *Scope, gradients tf.Output, outputs tf.Output) (backprops tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "TensorArrayScatterV3",
+ Type: "EluGrad",
Input: []tf.Input{
- handle, indices, value, flow_in,
+ gradients, outputs,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Push an element onto the tensor_array.
+// Converts each string in the input Tensor to its hash mod by a number of buckets.
+//
+// The hash function is deterministic on the content of the string within the
+// process.
+//
+// Note that the hash function may change from time to time.
+// This functionality will be deprecated and it's recommended to use
+// `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`.
//
// Arguments:
-// handle: The handle to a TensorArray.
-// index: The position to write to inside the TensorArray.
-// value: The tensor to write to the TensorArray.
-// flow_in: A float scalar that enforces proper chaining of operations.
//
-// Returns A float scalar that enforces proper chaining of operations.
-func TensorArrayWriteV3(scope *Scope, handle tf.Output, index tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output) {
+// num_buckets: The number of buckets.
+//
+// Returns A Tensor of the same shape as the input `string_tensor`.
+func StringToHashBucket(scope *Scope, string_tensor tf.Output, num_buckets int64) (output tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"num_buckets": num_buckets}
opspec := tf.OpSpec{
- Type: "TensorArrayWriteV3",
+ Type: "StringToHashBucket",
Input: []tf.Input{
- handle, index, value, flow_in,
+ string_tensor,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Creates a TensorArray for storing the gradients of values in the given handle.
-//
-// If the given TensorArray gradient already exists, returns a reference to it.
-//
-// Locks the size of the original TensorArray by disabling its dynamic size flag.
-//
-// **A note about the input flow_in:**
-//
-// The handle flow_in forces the execution of the gradient lookup to occur
-// only after certain other operations have occurred. For example, when
-// the forward TensorArray is dynamically sized, writes to this TensorArray
-// may resize the object. The gradient TensorArray is statically sized based
-// on the size of the forward TensorArray when this operation executes.
-// Furthermore, the size of the forward TensorArray is frozen by this call.
-// As a result, the flow is used to ensure that the call to generate the gradient
-// TensorArray only happens after all writes are executed.
-//
-// In the case of dynamically sized TensorArrays, gradient computation should
-// only be performed on read operations that have themselves been chained via
-// flow to occur only after all writes have executed. That way the final size
-// of the forward TensorArray is known when this operation is called.
-//
-// **A note about the source attribute:**
+// Creates a dataset that contains `count` elements from the `input_dataset`.
//
-// TensorArray gradient calls use an accumulator TensorArray object. If
-// multiple gradients are calculated and run in the same session, the multiple
-// gradient nodes may accidentally flow through the same accumulator TensorArray.
-// This double counts and generally breaks the TensorArray gradient flow.
+// Arguments:
//
-// The solution is to identify which gradient call this particular
-// TensorArray gradient is being called in. This is performed by identifying
-// a unique string (e.g. "gradients", "gradients_1", ...) from the input
-// gradient Tensor's name. This string is used as a suffix when creating
-// the TensorArray gradient object here (the attribute `source`).
+// count: A scalar representing the number of elements from the `input_dataset`
+// that should be taken. A value of `-1` indicates that all of `input_dataset`
+// is taken.
//
-// The attribute `source` is added as a suffix to the forward TensorArray's
-// name when performing the creation / lookup, so that each separate gradient
-// calculation gets its own TensorArray accumulator.
//
-// Arguments:
-// handle: The handle to the forward TensorArray.
-// flow_in: A float scalar that enforces proper chaining of operations.
-// source: The gradient source string, used to decide which gradient TensorArray
-// to return.
-func TensorArrayGradV3(scope *Scope, handle tf.Output, flow_in tf.Output, source string) (grad_handle tf.Output, flow_out tf.Output) {
+func TakeDataset(scope *Scope, input_dataset tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"source": source}
+ attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
opspec := tf.OpSpec{
- Type: "TensorArrayGradV3",
+ Type: "TakeDataset",
Input: []tf.Input{
- handle, flow_in,
+ input_dataset, count,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
+ return op.Output(0)
}
-// StackPushV2Attr is an optional argument to StackPushV2.
-type StackPushV2Attr func(optionalAttr)
+// Computes rectified linear 6: `min(max(features, 0), 6)`.
+func Relu6(scope *Scope, features tf.Output) (activations tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "Relu6",
+ Input: []tf.Input{
+ features,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
-// StackPushV2SwapMemory sets the optional swap_memory attribute to value.
+// Computes rectified linear gradients for a Relu operation.
//
-// value: Swap `elem` to CPU. Default to false.
-// If not specified, defaults to false
-func StackPushV2SwapMemory(value bool) StackPushV2Attr {
- return func(m optionalAttr) {
- m["swap_memory"] = value
+// Arguments:
+// gradients: The backpropagated gradients to the corresponding Relu operation.
+// features: The features passed as input to the corresponding Relu operation, OR
+// the outputs of that operation (both work equivalently).
+//
+// Returns `gradients * (features > 0)`.
+func ReluGrad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output) {
+ if scope.Err() != nil {
+ return
}
+ opspec := tf.OpSpec{
+ Type: "ReluGrad",
+ Input: []tf.Input{
+ gradients, features,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Push an element onto the stack.
+// Computes the gradient of morphological 2-D dilation with respect to the input.
//
// Arguments:
-// handle: The handle to a stack.
-// elem: The tensor to be pushed onto the stack.
+// input: 4-D with shape `[batch, in_height, in_width, depth]`.
+// filter: 3-D with shape `[filter_height, filter_width, depth]`.
+// out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.
+// strides: 1-D of length 4. The stride of the sliding window for each dimension of
+// the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
+// rates: 1-D of length 4. The input stride for atrous morphological dilation.
+// Must be: `[1, rate_height, rate_width, 1]`.
+// padding: The type of padding algorithm to use.
//
-// Returns The same tensor as the input 'elem'.
-func StackPushV2(scope *Scope, handle tf.Output, elem tf.Output, optional ...StackPushV2Attr) (output tf.Output) {
+// Returns 4-D with shape `[batch, in_height, in_width, depth]`.
+func Dilation2DBackpropInput(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, rates []int64, padding string) (in_backprop tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
+ attrs := map[string]interface{}{"strides": strides, "rates": rates, "padding": padding}
opspec := tf.OpSpec{
- Type: "StackPushV2",
+ Type: "Dilation2DBackpropInput",
Input: []tf.Input{
- handle, elem,
+ input, filter, out_backprop,
},
Attrs: attrs,
}
@@ -4421,764 +4015,648 @@ func StackPushV2(scope *Scope, handle tf.Output, elem tf.Output, optional ...Sta
return op.Output(0)
}
-// StackV2Attr is an optional argument to StackV2.
-type StackV2Attr func(optionalAttr)
+// CTCBeamSearchDecoderAttr is an optional argument to CTCBeamSearchDecoder.
+type CTCBeamSearchDecoderAttr func(optionalAttr)
-// StackV2StackName sets the optional stack_name attribute to value.
+// CTCBeamSearchDecoderMergeRepeated sets the optional merge_repeated attribute to value.
//
-// value: Overrides the name used for the temporary stack resource. Default
-// value is the name of the 'Stack' op (which is guaranteed unique).
-// If not specified, defaults to ""
-func StackV2StackName(value string) StackV2Attr {
+// value: If true, merge repeated classes in output.
+// If not specified, defaults to true
+func CTCBeamSearchDecoderMergeRepeated(value bool) CTCBeamSearchDecoderAttr {
return func(m optionalAttr) {
- m["stack_name"] = value
+ m["merge_repeated"] = value
}
}
-// A stack that produces elements in first-in last-out order.
+// Performs beam search decoding on the logits given in input.
+//
+// A note about the attribute merge_repeated: For the beam search decoder,
+// this means that if consecutive entries in a beam are the same, only
+// the first of these is emitted. That is, when the top path is "A B B B B",
+// "A B" is returned if merge_repeated = True but "A B B B B" is
+// returned if merge_repeated = False.
//
// Arguments:
-// max_size: The maximum size of the stack if non-negative. If negative, the stack
-// size is unlimited.
-// elem_type: The type of the elements on the stack.
+// inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
+// sequence_length: A vector containing sequence lengths, size `(batch)`.
+// beam_width: A scalar >= 0 (beam search beam width).
+// top_paths: A scalar >= 0, <= beam_width (controls output size).
//
-// Returns The handle to the stack.
-func StackV2(scope *Scope, max_size tf.Output, elem_type tf.DataType, optional ...StackV2Attr) (handle tf.Output) {
+// Returns A list (length: top_paths) of indices matrices. Matrix j,
+// size `(total_decoded_outputs[j] x 2)`, has indices of a
+// `SparseTensor<int64, 2>`. The rows store: [batch, time].A list (length: top_paths) of values vectors. Vector j,
+// size `(length total_decoded_outputs[j])`, has the values of a
+// `SparseTensor<int64, 2>`. The vector stores the decoded classes for beam j.A list (length: top_paths) of shape vector. Vector j,
+// size `(2)`, stores the shape of the decoded `SparseTensor[j]`.
+// Its values are: `[batch_size, max_decoded_length[j]]`.A matrix, shaped: `(batch_size x top_paths)`. The
+// sequence log-probabilities.
+func CTCBeamSearchDecoder(scope *Scope, inputs tf.Output, sequence_length tf.Output, beam_width int64, top_paths int64, optional ...CTCBeamSearchDecoderAttr) (decoded_indices []tf.Output, decoded_values []tf.Output, decoded_shape []tf.Output, log_probability tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"elem_type": elem_type}
+ attrs := map[string]interface{}{"beam_width": beam_width, "top_paths": top_paths}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "StackV2",
+ Type: "CTCBeamSearchDecoder",
Input: []tf.Input{
- max_size,
+ inputs, sequence_length,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ if scope.Err() != nil {
+ return
+ }
+ var idx int
+ var err error
+ if decoded_indices, idx, err = makeOutputList(op, idx, "decoded_indices"); err != nil {
+ scope.UpdateErr("CTCBeamSearchDecoder", err)
+ return
+ }
+ if decoded_values, idx, err = makeOutputList(op, idx, "decoded_values"); err != nil {
+ scope.UpdateErr("CTCBeamSearchDecoder", err)
+ return
+ }
+ if decoded_shape, idx, err = makeOutputList(op, idx, "decoded_shape"); err != nil {
+ scope.UpdateErr("CTCBeamSearchDecoder", err)
+ return
+ }
+ log_probability = op.Output(idx)
+ return decoded_indices, decoded_values, decoded_shape, log_probability
}
-// Returns the batched diagonal part of a batched tensor.
-//
-// This operation returns a tensor with the `diagonal` part
-// of the batched `input`. The `diagonal` part is computed as follows:
-//
-// Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
-// tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where:
-//
-// `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`.
+// AudioSpectrogramAttr is an optional argument to AudioSpectrogram.
+type AudioSpectrogramAttr func(optionalAttr)
+
+// AudioSpectrogramMagnitudeSquared sets the optional magnitude_squared attribute to value.
//
-// The input must be at least a matrix.
+// value: Whether to return the squared magnitude or just the
+// magnitude. Using squared magnitude can avoid extra calculations.
+// If not specified, defaults to false
+func AudioSpectrogramMagnitudeSquared(value bool) AudioSpectrogramAttr {
+ return func(m optionalAttr) {
+ m["magnitude_squared"] = value
+ }
+}
+
+// Produces a visualization of audio data over time.
//
-// For example:
+// Spectrograms are a standard way of representing audio information as a series of
+// slices of frequency information, one slice for each window of time. By joining
+// these together into a sequence, they form a distinctive fingerprint of the sound
+// over time.
//
-// ```
-// # 'input' is [[[1, 0, 0, 0]
-// [0, 2, 0, 0]
-// [0, 0, 3, 0]
-// [0, 0, 0, 4]],
-// [[5, 0, 0, 0]
-// [0, 6, 0, 0]
-// [0, 0, 7, 0]
-// [0, 0, 0, 8]]]
+// This op expects to receive audio data as an input, stored as floats in the range
+// -1 to 1, together with a window width in samples, and a stride specifying how
+// far to move the window between slices. From this it generates a three
+// dimensional output. The lowest dimension has an amplitude value for each
+// frequency during that time slice. The next dimension is time, with successive
+// frequency slices. The final dimension is for the channels in the input, so a
+// stereo audio input would have two here for example.
//
-// and input.shape = (2, 4, 4)
+// This means the layout when converted and saved as an image is rotated 90 degrees
+// clockwise from a typical spectrogram. Time is descending down the Y axis, and
+// the frequency decreases from left to right.
//
-// tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]]
+// Each value in the result represents the square root of the sum of the real and
+// imaginary parts of an FFT on the current window of samples. In this way, the
+// lowest dimension represents the power of each frequency in the current window,
+// and adjacent windows are concatenated in the next dimension.
//
-// which has shape (2, 4)
-// ```
+// To get a more intuitive and visual look at what this operation does, you can run
+// tensorflow/examples/wav_to_spectrogram to read in an audio file and save out the
+// resulting spectrogram as a PNG image.
//
// Arguments:
-// input: Rank `k` tensor where `k >= 2`.
+// input: Float representation of audio data.
+// window_size: How wide the input window is in samples. For the highest efficiency
+// this should be a power of two, but other values are accepted.
+// stride: How widely apart the center of adjacent sample windows should be.
//
-// Returns The extracted diagonal(s) having shape
-// `diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`.
-func MatrixDiagPart(scope *Scope, input tf.Output) (diagonal tf.Output) {
+// Returns 3D representation of the audio frequencies as an image.
+func AudioSpectrogram(scope *Scope, input tf.Output, window_size int64, stride int64, optional ...AudioSpectrogramAttr) (spectrogram tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"window_size": window_size, "stride": stride}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "MatrixDiagPart",
+ Type: "AudioSpectrogram",
Input: []tf.Input{
input,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Returns true if queue is closed.
+// Compute the polygamma function \\(\psi^{(n)}(x)\\).
//
-// This operation returns true if the queue is closed and false if the queue
-// is open.
+// The polygamma function is defined as:
//
-// Arguments:
-// handle: The handle to a queue.
-func QueueIsClosedV2(scope *Scope, handle tf.Output) (is_closed tf.Output) {
+//
+// \\(\psi^{(n)}(x) = \frac{d^n}{dx^n} \psi(x)\\)
+//
+// where \\(\psi(x)\\) is the digamma function.
+func Polygamma(scope *Scope, a tf.Output, x tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "QueueIsClosedV2",
+ Type: "Polygamma",
Input: []tf.Input{
- handle,
+ a, x,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// QueueCloseV2Attr is an optional argument to QueueCloseV2.
-type QueueCloseV2Attr func(optionalAttr)
-
-// QueueCloseV2CancelPendingEnqueues sets the optional cancel_pending_enqueues attribute to value.
-//
-// value: If true, all pending enqueue requests that are
-// blocked on the given queue will be canceled.
-// If not specified, defaults to false
-func QueueCloseV2CancelPendingEnqueues(value bool) QueueCloseV2Attr {
- return func(m optionalAttr) {
- m["cancel_pending_enqueues"] = value
- }
-}
-
-// Closes the given queue.
-//
-// This operation signals that no more elements will be enqueued in the
-// given queue. Subsequent Enqueue(Many) operations will fail.
-// Subsequent Dequeue(Many) operations will continue to succeed if
-// sufficient elements remain in the queue. Subsequent Dequeue(Many)
-// operations that would block will fail immediately.
+// Computes second-order gradients of the maxpooling function.
//
// Arguments:
-// handle: The handle to a queue.
+// input: The original input.
+// grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the
+// input of `max_pool`.
+// argmax: The indices of the maximum values chosen for each output of `max_pool`.
+// ksize: The size of the window for each dimension of the input tensor.
+// strides: The stride of the sliding window for each dimension of the
+// input tensor.
+// padding: The type of padding algorithm to use.
//
-// Returns the created operation.
-func QueueCloseV2(scope *Scope, handle tf.Output, optional ...QueueCloseV2Attr) (o *tf.Operation) {
+// Returns Gradients of gradients w.r.t. the input of `max_pool`.
+func MaxPoolGradGradWithArgmax(scope *Scope, input tf.Output, grad tf.Output, argmax tf.Output, ksize []int64, strides []int64, padding string) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
+ attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
opspec := tf.OpSpec{
- Type: "QueueCloseV2",
+ Type: "MaxPoolGradGradWithArgmax",
Input: []tf.Input{
- handle,
+ input, grad, argmax,
},
Attrs: attrs,
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// QueueDequeueUpToV2Attr is an optional argument to QueueDequeueUpToV2.
-type QueueDequeueUpToV2Attr func(optionalAttr)
+// MaxPoolGradGradV2Attr is an optional argument to MaxPoolGradGradV2.
+type MaxPoolGradGradV2Attr func(optionalAttr)
-// QueueDequeueUpToV2TimeoutMs sets the optional timeout_ms attribute to value.
+// MaxPoolGradGradV2DataFormat sets the optional data_format attribute to value.
//
-// value: If the queue has fewer than n elements, this operation
-// will block for up to timeout_ms milliseconds.
-// Note: This option is not supported yet.
-// If not specified, defaults to -1
-func QueueDequeueUpToV2TimeoutMs(value int64) QueueDequeueUpToV2Attr {
+// value: Specify the data format of the input and output data. With the
+// default format "NHWC", the data is stored in the order of:
+// [batch, in_height, in_width, in_channels].
+// Alternatively, the format could be "NCHW", the data storage order of:
+// [batch, in_channels, in_height, in_width].
+// If not specified, defaults to "NHWC"
+func MaxPoolGradGradV2DataFormat(value string) MaxPoolGradGradV2Attr {
return func(m optionalAttr) {
- m["timeout_ms"] = value
+ m["data_format"] = value
}
}
-// Dequeues `n` tuples of one or more tensors from the given queue.
-//
-// This operation is not supported by all queues. If a queue does not support
-// DequeueUpTo, then an Unimplemented error is returned.
-//
-// If the queue is closed and there are more than 0 but less than `n`
-// elements remaining, then instead of returning an OutOfRange error like
-// QueueDequeueMany, less than `n` elements are returned immediately. If
-// the queue is closed and there are 0 elements left in the queue, then
-// an OutOfRange error is returned just like in QueueDequeueMany.
-// Otherwise the behavior is identical to QueueDequeueMany:
-//
-// This operation concatenates queue-element component tensors along the
-// 0th dimension to make a single component tensor. All of the components
-// in the dequeued tuple will have size n in the 0th dimension.
-//
-// This operation has `k` outputs, where `k` is the number of components in
-// the tuples stored in the given queue, and output `i` is the ith
-// component of the dequeued tuple.
+// Computes second-order gradients of the maxpooling function.
//
// Arguments:
-// handle: The handle to a queue.
-// n: The number of tuples to dequeue.
-// component_types: The type of each component in a tuple.
+// orig_input: The original input tensor.
+// orig_output: The original output tensor.
+// grad: 4-D. Gradients of gradients w.r.t. the input of `max_pool`.
+// ksize: The size of the window for each dimension of the input tensor.
+// strides: The stride of the sliding window for each dimension of the
+// input tensor.
+// padding: The type of padding algorithm to use.
//
-// Returns One or more tensors that were dequeued as a tuple.
-func QueueDequeueUpToV2(scope *Scope, handle tf.Output, n tf.Output, component_types []tf.DataType, optional ...QueueDequeueUpToV2Attr) (components []tf.Output) {
+// Returns Gradients of gradients w.r.t. the input to `max_pool`.
+func MaxPoolGradGradV2(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize tf.Output, strides tf.Output, padding string, optional ...MaxPoolGradGradV2Attr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"component_types": component_types}
+ attrs := map[string]interface{}{"padding": padding}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "QueueDequeueUpToV2",
+ Type: "MaxPoolGradGradV2",
Input: []tf.Input{
- handle, n,
+ orig_input, orig_output, grad, ksize, strides,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- if scope.Err() != nil {
- return
- }
- var idx int
- var err error
- if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
- scope.UpdateErr("QueueDequeueUpToV2", err)
- return
- }
- return components
+ return op.Output(0)
}
-// Deprecated. Use TensorArrayCloseV3
+// Fast Fourier transform.
//
-// Returns the created operation.
-func TensorArrayCloseV2(scope *Scope, handle tf.Output) (o *tf.Operation) {
+// Computes the 1-dimensional discrete Fourier transform over the inner-most
+// dimension of `input`.
+//
+// Arguments:
+// input: A complex64 tensor.
+//
+// Returns A complex64 tensor of the same shape as `input`. The inner-most
+// dimension of `input` is replaced with its 1D Fourier transform.
+//
+// @compatibility(numpy)
+// Equivalent to np.fft.fft
+// @end_compatibility
+func FFT(scope *Scope, input tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "TensorArrayCloseV2",
+ Type: "FFT",
Input: []tf.Input{
- handle,
+ input,
},
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// QueueDequeueManyV2Attr is an optional argument to QueueDequeueManyV2.
-type QueueDequeueManyV2Attr func(optionalAttr)
+// MaxPoolAttr is an optional argument to MaxPool.
+type MaxPoolAttr func(optionalAttr)
-// QueueDequeueManyV2TimeoutMs sets the optional timeout_ms attribute to value.
+// MaxPoolDataFormat sets the optional data_format attribute to value.
//
-// value: If the queue has fewer than n elements, this operation
-// will block for up to timeout_ms milliseconds.
-// Note: This option is not supported yet.
-// If not specified, defaults to -1
-func QueueDequeueManyV2TimeoutMs(value int64) QueueDequeueManyV2Attr {
+// value: Specify the data format of the input and output data. With the
+// default format "NHWC", the data is stored in the order of:
+// [batch, in_height, in_width, in_channels].
+// Alternatively, the format could be "NCHW", the data storage order of:
+// [batch, in_channels, in_height, in_width].
+// If not specified, defaults to "NHWC"
+func MaxPoolDataFormat(value string) MaxPoolAttr {
return func(m optionalAttr) {
- m["timeout_ms"] = value
+ m["data_format"] = value
}
}
-// Dequeues `n` tuples of one or more tensors from the given queue.
-//
-// If the queue is closed and there are fewer than `n` elements, then an
-// OutOfRange error is returned.
-//
-// This operation concatenates queue-element component tensors along the
-// 0th dimension to make a single component tensor. All of the components
-// in the dequeued tuple will have size `n` in the 0th dimension.
-//
-// This operation has `k` outputs, where `k` is the number of components in
-// the tuples stored in the given queue, and output `i` is the ith
-// component of the dequeued tuple.
-//
-// N.B. If the queue is empty, this operation will block until `n` elements
-// have been dequeued (or 'timeout_ms' elapses, if specified).
+// Performs max pooling on the input.
//
// Arguments:
-// handle: The handle to a queue.
-// n: The number of tuples to dequeue.
-// component_types: The type of each component in a tuple.
+// input: 4-D input to pool over.
+// ksize: The size of the window for each dimension of the input tensor.
+// strides: The stride of the sliding window for each dimension of the
+// input tensor.
+// padding: The type of padding algorithm to use.
//
-// Returns One or more tensors that were dequeued as a tuple.
-func QueueDequeueManyV2(scope *Scope, handle tf.Output, n tf.Output, component_types []tf.DataType, optional ...QueueDequeueManyV2Attr) (components []tf.Output) {
+// Returns The max pooled output tensor.
+func MaxPool(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"component_types": component_types}
+ attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "QueueDequeueManyV2",
+ Type: "MaxPool",
Input: []tf.Input{
- handle, n,
+ input,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- if scope.Err() != nil {
- return
- }
- var idx int
- var err error
- if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
- scope.UpdateErr("QueueDequeueManyV2", err)
- return
- }
- return components
-}
-
-// QueueEnqueueV2Attr is an optional argument to QueueEnqueueV2.
-type QueueEnqueueV2Attr func(optionalAttr)
-
-// QueueEnqueueV2TimeoutMs sets the optional timeout_ms attribute to value.
-//
-// value: If the queue is full, this operation will block for up to
-// timeout_ms milliseconds.
-// Note: This option is not supported yet.
-// If not specified, defaults to -1
-func QueueEnqueueV2TimeoutMs(value int64) QueueEnqueueV2Attr {
- return func(m optionalAttr) {
- m["timeout_ms"] = value
- }
+ return op.Output(0)
}
-// Enqueues a tuple of one or more tensors in the given queue.
+// Bucketizes 'input' based on 'boundaries'.
//
-// The components input has k elements, which correspond to the components of
-// tuples stored in the given queue.
+// For example, if the inputs are
+// boundaries = [0, 10, 100]
+// input = [[-5, 10000]
+// [150, 10]
+// [5, 100]]
//
-// N.B. If the queue is full, this operation will block until the given
-// element has been enqueued (or 'timeout_ms' elapses, if specified).
+// then the output will be
+// output = [[0, 3]
+// [3, 2]
+// [1, 3]]
//
// Arguments:
-// handle: The handle to a queue.
-// components: One or more tensors from which the enqueued tensors should be taken.
+// input: Any shape of Tensor contains with int or float type.
+// boundaries: A sorted list of floats gives the boundary of the buckets.
//
-// Returns the created operation.
-func QueueEnqueueV2(scope *Scope, handle tf.Output, components []tf.Output, optional ...QueueEnqueueV2Attr) (o *tf.Operation) {
+// Returns Same shape with 'input', each value of input replaced with bucket index.
+//
+// @compatibility(numpy)
+// Equivalent to np.digitize.
+// @end_compatibility
+func Bucketize(scope *Scope, input tf.Output, boundaries []float32) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
+ attrs := map[string]interface{}{"boundaries": boundaries}
opspec := tf.OpSpec{
- Type: "QueueEnqueueV2",
+ Type: "Bucketize",
Input: []tf.Input{
- handle, tf.OutputList(components),
+ input,
},
Attrs: attrs,
}
- return scope.AddOperation(opspec)
-}
-
-// ResourceStridedSliceAssignAttr is an optional argument to ResourceStridedSliceAssign.
-type ResourceStridedSliceAssignAttr func(optionalAttr)
-
-// ResourceStridedSliceAssignBeginMask sets the optional begin_mask attribute to value.
-// If not specified, defaults to 0
-func ResourceStridedSliceAssignBeginMask(value int64) ResourceStridedSliceAssignAttr {
- return func(m optionalAttr) {
- m["begin_mask"] = value
- }
-}
-
-// ResourceStridedSliceAssignEndMask sets the optional end_mask attribute to value.
-// If not specified, defaults to 0
-func ResourceStridedSliceAssignEndMask(value int64) ResourceStridedSliceAssignAttr {
- return func(m optionalAttr) {
- m["end_mask"] = value
- }
-}
-
-// ResourceStridedSliceAssignEllipsisMask sets the optional ellipsis_mask attribute to value.
-// If not specified, defaults to 0
-func ResourceStridedSliceAssignEllipsisMask(value int64) ResourceStridedSliceAssignAttr {
- return func(m optionalAttr) {
- m["ellipsis_mask"] = value
- }
-}
-
-// ResourceStridedSliceAssignNewAxisMask sets the optional new_axis_mask attribute to value.
-// If not specified, defaults to 0
-func ResourceStridedSliceAssignNewAxisMask(value int64) ResourceStridedSliceAssignAttr {
- return func(m optionalAttr) {
- m["new_axis_mask"] = value
- }
-}
-
-// ResourceStridedSliceAssignShrinkAxisMask sets the optional shrink_axis_mask attribute to value.
-// If not specified, defaults to 0
-func ResourceStridedSliceAssignShrinkAxisMask(value int64) ResourceStridedSliceAssignAttr {
- return func(m optionalAttr) {
- m["shrink_axis_mask"] = value
- }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Assign `value` to the sliced l-value reference of `ref`.
-//
-// The values of `value` are assigned to the positions in the variable
-// `ref` that are selected by the slice parameters. The slice parameters
-// `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.
+// Computes gradients of the maxpooling function.
//
-// NOTE this op currently does not support broadcasting and so `value`'s
-// shape must be exactly the shape produced by the slice of `ref`.
+// Arguments:
+// input: The original input.
+// grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the
+// output of `max_pool`.
+// argmax: The indices of the maximum values chosen for each output of `max_pool`.
+// ksize: The size of the window for each dimension of the input tensor.
+// strides: The stride of the sliding window for each dimension of the
+// input tensor.
+// padding: The type of padding algorithm to use.
//
-// Returns the created operation.
-func ResourceStridedSliceAssign(scope *Scope, ref tf.Output, begin tf.Output, end tf.Output, strides tf.Output, value tf.Output, optional ...ResourceStridedSliceAssignAttr) (o *tf.Operation) {
+// Returns Gradients w.r.t. the input of `max_pool`.
+func MaxPoolGradWithArgmax(scope *Scope, input tf.Output, grad tf.Output, argmax tf.Output, ksize []int64, strides []int64, padding string) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
+ attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
opspec := tf.OpSpec{
- Type: "ResourceStridedSliceAssign",
+ Type: "MaxPoolGradWithArgmax",
Input: []tf.Input{
- ref, begin, end, strides, value,
+ input, grad, argmax,
},
Attrs: attrs,
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// UnstageAttr is an optional argument to Unstage.
-type UnstageAttr func(optionalAttr)
-
-// UnstageCapacity sets the optional capacity attribute to value.
-// If not specified, defaults to 0
-//
-// REQUIRES: value >= 0
-func UnstageCapacity(value int64) UnstageAttr {
- return func(m optionalAttr) {
- m["capacity"] = value
- }
-}
+// CriticalSectionOpAttr is an optional argument to CriticalSectionOp.
+type CriticalSectionOpAttr func(optionalAttr)
-// UnstageMemoryLimit sets the optional memory_limit attribute to value.
-// If not specified, defaults to 0
+// CriticalSectionOpContainer sets the optional container attribute to value.
//
-// REQUIRES: value >= 0
-func UnstageMemoryLimit(value int64) UnstageAttr {
- return func(m optionalAttr) {
- m["memory_limit"] = value
- }
-}
-
-// UnstageContainer sets the optional container attribute to value.
+// value: the container this critical section is placed in.
// If not specified, defaults to ""
-func UnstageContainer(value string) UnstageAttr {
+func CriticalSectionOpContainer(value string) CriticalSectionOpAttr {
return func(m optionalAttr) {
m["container"] = value
}
}
-// UnstageSharedName sets the optional shared_name attribute to value.
+// CriticalSectionOpSharedName sets the optional shared_name attribute to value.
+//
+// value: the name by which this critical section is referred to.
// If not specified, defaults to ""
-func UnstageSharedName(value string) UnstageAttr {
+func CriticalSectionOpSharedName(value string) CriticalSectionOpAttr {
return func(m optionalAttr) {
m["shared_name"] = value
}
}
-// Op is similar to a lightweight Dequeue.
-//
-// The basic functionality is similar to dequeue with many fewer
-// capabilities and options. This Op is optimized for performance.
-func Unstage(scope *Scope, dtypes []tf.DataType, optional ...UnstageAttr) (values []tf.Output) {
+// Creates a handle to a CriticalSection resource.
+func CriticalSectionOp(scope *Scope, optional ...CriticalSectionOpAttr) (resource tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtypes": dtypes}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "Unstage",
+ Type: "CriticalSectionOp",
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- if scope.Err() != nil {
- return
- }
- var idx int
- var err error
- if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
- scope.UpdateErr("Unstage", err)
- return
- }
- return values
-}
-
-// PriorityQueueV2Attr is an optional argument to PriorityQueueV2.
-type PriorityQueueV2Attr func(optionalAttr)
-
-// PriorityQueueV2ComponentTypes sets the optional component_types attribute to value.
-//
-// value: The type of each component in a value.
-// If not specified, defaults to <>
-//
-// REQUIRES: len(value) >= 0
-func PriorityQueueV2ComponentTypes(value []tf.DataType) PriorityQueueV2Attr {
- return func(m optionalAttr) {
- m["component_types"] = value
- }
-}
-
-// PriorityQueueV2Capacity sets the optional capacity attribute to value.
-//
-// value: The upper bound on the number of elements in this queue.
-// Negative numbers mean no limit.
-// If not specified, defaults to -1
-func PriorityQueueV2Capacity(value int64) PriorityQueueV2Attr {
- return func(m optionalAttr) {
- m["capacity"] = value
- }
+ return op.Output(0)
}
-// PriorityQueueV2Container sets the optional container attribute to value.
-//
-// value: If non-empty, this queue is placed in the given container.
-// Otherwise, a default container is used.
-// If not specified, defaults to ""
-func PriorityQueueV2Container(value string) PriorityQueueV2Attr {
- return func(m optionalAttr) {
- m["container"] = value
- }
-}
+// AvgPool3DAttr is an optional argument to AvgPool3D.
+type AvgPool3DAttr func(optionalAttr)
-// PriorityQueueV2SharedName sets the optional shared_name attribute to value.
+// AvgPool3DDataFormat sets the optional data_format attribute to value.
//
-// value: If non-empty, this queue will be shared under the given name
-// across multiple sessions.
-// If not specified, defaults to ""
-func PriorityQueueV2SharedName(value string) PriorityQueueV2Attr {
+// value: The data format of the input and output data. With the
+// default format "NDHWC", the data is stored in the order of:
+// [batch, in_depth, in_height, in_width, in_channels].
+// Alternatively, the format could be "NCDHW", the data storage order is:
+// [batch, in_channels, in_depth, in_height, in_width].
+// If not specified, defaults to "NDHWC"
+func AvgPool3DDataFormat(value string) AvgPool3DAttr {
return func(m optionalAttr) {
- m["shared_name"] = value
+ m["data_format"] = value
}
}
-// A queue that produces elements sorted by the first component value.
-//
-// Note that the PriorityQueue requires the first component of any element
-// to be a scalar int64, in addition to the other elements declared by
-// component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue
-// and DequeueMany) on a PriorityQueue will all require (resp. output) one extra
-// entry in their input (resp. output) lists.
+// Performs 3D average pooling on the input.
//
// Arguments:
-// shapes: The shape of each component in a value. The length of this attr must
-// be either 0 or the same as the length of component_types. If the length of
-// this attr is 0, the shapes of queue elements are not constrained, and
-// only one element may be dequeued at a time.
+// input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
+// ksize: 1-D tensor of length 5. The size of the window for each dimension of
+// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
+// strides: 1-D tensor of length 5. The stride of the sliding window for each
+// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
+// padding: The type of padding algorithm to use.
//
-// Returns The handle to the queue.
-func PriorityQueueV2(scope *Scope, shapes []tf.Shape, optional ...PriorityQueueV2Attr) (handle tf.Output) {
+// Returns The average pooled output tensor.
+func AvgPool3D(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPool3DAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"shapes": shapes}
+ attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "PriorityQueueV2",
-
+ Type: "AvgPool3D",
+ Input: []tf.Input{
+ input,
+ },
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// StridedSliceAttr is an optional argument to StridedSlice.
-type StridedSliceAttr func(optionalAttr)
-
-// StridedSliceBeginMask sets the optional begin_mask attribute to value.
+// Returns element-wise remainder of division. This emulates C semantics in that
//
-// value: a bitmask where a bit i being 1 means to ignore the begin
-// value and instead use the largest interval possible. At runtime
-// begin[i] will be replaced with `[0, n-1) if `stride[i] > 0` or
-// `[-1, n-1]` if `stride[i] < 0`
-// If not specified, defaults to 0
-func StridedSliceBeginMask(value int64) StridedSliceAttr {
- return func(m optionalAttr) {
- m["begin_mask"] = value
+// the result here is consistent with a truncating divide. E.g.
+// `tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`.
+//
+// *NOTE*: `Mod` supports broadcasting. More about broadcasting
+// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+func Mod(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
+ if scope.Err() != nil {
+ return
}
+ opspec := tf.OpSpec{
+ Type: "Mod",
+ Input: []tf.Input{
+ x, y,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// StridedSliceEndMask sets the optional end_mask attribute to value.
+// Computes square root of x element-wise.
//
-// value: analogous to `begin_mask`
-// If not specified, defaults to 0
-func StridedSliceEndMask(value int64) StridedSliceAttr {
- return func(m optionalAttr) {
- m["end_mask"] = value
+// I.e., \\(y = \sqrt{x} = x^{1/2}\\).
+func Sqrt(scope *Scope, x tf.Output) (y tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "Sqrt",
+ Input: []tf.Input{
+ x,
+ },
}
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// StridedSliceEllipsisMask sets the optional ellipsis_mask attribute to value.
+// Computes the gradients of 3-D convolution with respect to the filter.
//
-// value: a bitmask where bit `i` being 1 means the `i`th
-// position is actually an ellipsis. One bit at most can be 1.
-// If `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)`
-// is provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis
-// implicitly creates as many range specifications as necessary to fully
-// specify the sliced range for every dimension. For example for a 4-dimensional
-// tensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`.
-// If not specified, defaults to 0
-func StridedSliceEllipsisMask(value int64) StridedSliceAttr {
- return func(m optionalAttr) {
- m["ellipsis_mask"] = value
+// DEPRECATED at GraphDef version 10: Use Conv3DBackpropFilterV2
+//
+// Arguments:
+// input: Shape `[batch, depth, rows, cols, in_channels]`.
+// filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
+// `in_channels` must match between `input` and `filter`.
+// out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
+// out_channels]`.
+// strides: 1-D tensor of length 5. The stride of the sliding window for each
+// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
+// padding: The type of padding algorithm to use.
+func Conv3DBackpropFilter(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string) (output tf.Output) {
+ if scope.Err() != nil {
+ return
}
+ attrs := map[string]interface{}{"strides": strides, "padding": padding}
+ opspec := tf.OpSpec{
+ Type: "Conv3DBackpropFilter",
+ Input: []tf.Input{
+ input, filter, out_backprop,
+ },
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// StridedSliceNewAxisMask sets the optional new_axis_mask attribute to value.
+// Computes the gradient for the rsqrt of `x` wrt its input.
//
-// value: a bitmask where bit `i` being 1 means the `i`th
-// specification creates a new shape 1 dimension. For example
-// `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.
-// If not specified, defaults to 0
-func StridedSliceNewAxisMask(value int64) StridedSliceAttr {
- return func(m optionalAttr) {
- m["new_axis_mask"] = value
+// Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy`
+// is the corresponding input gradient.
+func RsqrtGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "RsqrtGrad",
+ Input: []tf.Input{
+ y, dy,
+ },
}
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// StridedSliceShrinkAxisMask sets the optional shrink_axis_mask attribute to value.
+// ReverseSequenceAttr is an optional argument to ReverseSequence.
+type ReverseSequenceAttr func(optionalAttr)
+
+// ReverseSequenceBatchDim sets the optional batch_dim attribute to value.
//
-// value: a bitmask where bit `i` implies that the `i`th
-// specification should shrink the dimensionality. begin and end
-// must imply a slice of size 1 in the dimension. For example in
-// python one might do `foo[:, 3, :]` which would result in
-// `shrink_axis_mask` being 2.
+// value: The dimension along which reversal is performed.
// If not specified, defaults to 0
-func StridedSliceShrinkAxisMask(value int64) StridedSliceAttr {
+func ReverseSequenceBatchDim(value int64) ReverseSequenceAttr {
return func(m optionalAttr) {
- m["shrink_axis_mask"] = value
+ m["batch_dim"] = value
}
}
-// Return a strided slice from `input`.
-//
-// Note, most python users will want to use the Python `Tensor.__getitem__`
-// or `Variable.__getitem__` rather than this op directly.
-//
-// The goal of this op is to produce a new tensor with a subset of
-// the elements from the `n` dimensional `input` tensor. The subset is chosen using
-// a sequence of `m` sparse range specifications encoded into the arguments
-// of this function. Note, in some cases
-// `m` could be equal to `n`, but this need not be the case. Each
-// range specification entry can be one of the following:
-//
-// - An ellipsis (...). Ellipses are used to imply zero or more
-// dimensions of full-dimension selection and are produced using
-// `ellipsis_mask`. For example, `foo[...]` is the identity slice.
-//
-// - A new axis. This is used to insert a new shape=1 dimension and is
-// produced using `new_axis_mask`. For example, `foo[:, ...]` where
-// `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor.
+// Reverses variable length slices.
//
+// This op first slices `input` along the dimension `batch_dim`, and for each
+// slice `i`, reverses the first `seq_lengths[i]` elements along
+// the dimension `seq_dim`.
//
-// - A range `begin:end:stride`. This is used to specify how much to choose from
-// a given dimension. `stride` can be any integer but 0. `begin` is an integer
-// which represents the index of the first value to select while `end` represents
-// the index of the last value to select. The number of values selected in each
-// dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`.
-// `begin` and `end` can be negative where `-1` is the last element, `-2` is
-// the second to last. `begin_mask` controls whether to replace the explicitly
-// given `begin` with an implicit effective value of `0` if `stride > 0` and
-// `-1` if `stride < 0`. `end_mask` is analogous but produces the number
-// required to create the largest open interval. For example, given a shape
-// `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do
-// not assume this is equivalent to `foo[0:-1]` which has an effective `begin`
-// and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the
-// first dimension of a tensor while dropping the last two (in the original
-// order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`.
+// The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`,
+// and `seq_lengths` must be a vector of length `input.dims[batch_dim]`.
//
-// - A single index. This is used to keep only elements that have a given
-// index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a
-// shape `(6,)` tensor. This is encoded in `begin` and `end` and
-// `shrink_axis_mask`.
+// The output slice `i` along dimension `batch_dim` is then given by input
+// slice `i`, with the first `seq_lengths[i]` slices along dimension
+// `seq_dim` reversed.
//
-// Each conceptual range specification is encoded in the op's argument. This
-// encoding is best understand by considering a non-trivial example. In
-// particular,
-// `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as
+// For example:
//
// ```
-// begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0)
-// end = [2, 4, x, x, -3, x]
-// strides = [1, 1, x, x, -1, 1]
-// begin_mask = 1<<4 | 1 << 5 = 48
-// end_mask = 1<<5 = 32
-// ellipsis_mask = 1<<3 = 8
-// new_axis_mask = 1<<2 4
-// shrink_axis_mask = 1<<0
-// ```
-//
-// In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of
-// the slice becomes (2, 1, 5, 5, 2, 5).
-// Let us walk step by step through each argument specification.
-//
-// 1. The first argument in the example slice is turned into `begin = 1` and
-// `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we
-// also set the appropriate bit in `shrink_axis_mask`.
+// # Given this:
+// batch_dim = 0
+// seq_dim = 1
+// input.dims = (4, 8, ...)
+// seq_lengths = [7, 2, 3, 5]
//
-// 2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have
-// zero bits contributed.
+// # then slices of input are reversed on seq_dim, but only up to seq_lengths:
+// output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]
+// output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]
+// output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]
+// output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]
//
-// 3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1
-// dimension in the final shape. Dummy values are contributed to begin,
-// end and stride, while the new_axis_mask bit is set.
+// # while entries past seq_lens are copied through:
+// output[0, 7:, :, ...] = input[0, 7:, :, ...]
+// output[1, 2:, :, ...] = input[1, 2:, :, ...]
+// output[2, 3:, :, ...] = input[2, 3:, :, ...]
+// output[3, 2:, :, ...] = input[3, 2:, :, ...]
+// ```
//
-// 4. `...` grab the full ranges from as many dimensions as needed to
-// fully specify a slice for every dimension of the input shape.
+// In contrast, if:
//
-// 5. `:-3:-1` shows the use of negative indices. A negative index `i` associated
-// with a dimension that has shape `s` is converted to a positive index
-// `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion
-// is done internally so begin, end and strides receive x, -3, and -1.
-// The appropriate begin_mask bit is set to indicate the start range is the
-// full range (ignoring the x).
+// ```
+// # Given this:
+// batch_dim = 2
+// seq_dim = 0
+// input.dims = (8, ?, 4, ...)
+// seq_lengths = [7, 2, 3, 5]
//
-// 6. `:` indicates that the entire contents of the corresponding dimension
-// is selected. This is equivalent to `::` or `0::1`. begin, end, and strides
-// receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and
-// `end_mask` are also set.
+// # then slices of input are reversed on seq_dim, but only up to seq_lengths:
+// output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]
+// output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]
+// output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]
+// output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]
//
-// *Requirements*:
-// `0 != strides[i] for i in [0, m)`
-// `ellipsis_mask must be a power of two (only one ellipsis)`
+// # while entries past seq_lens are copied through:
+// output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]
+// output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]
+// output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]
+// output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]
+// ```
//
// Arguments:
+// input: The input to reverse.
+// seq_lengths: 1-D with length `input.dims(batch_dim)` and
+// `max(seq_lengths) <= input.dims(seq_dim)`
+// seq_dim: The dimension which is partially reversed.
//
-// begin: `begin[k]` specifies the offset into the `k`th range specification.
-// The exact dimension this corresponds to will be determined by context.
-// Out-of-bounds values will be silently clamped. If the `k`th bit of
-// `begin_mask` then `begin[k]` is ignored and the full range of the
-// appropriate dimension is used instead. Negative values causes indexing
-// to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`.
-// end: `end[i]` is like `begin` with the exception that `end_mask` is
-// used to determine full ranges.
-// strides: `strides[i]` specifies the increment in the `i`th specification
-// after extracting a given element. Negative indices will reverse
-// the original order. Out or range values are
-// clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`
-func StridedSlice(scope *Scope, input tf.Output, begin tf.Output, end tf.Output, strides tf.Output, optional ...StridedSliceAttr) (output tf.Output) {
+// Returns The partially reversed input. It has the same shape as `input`.
+func ReverseSequence(scope *Scope, input tf.Output, seq_lengths tf.Output, seq_dim int64, optional ...ReverseSequenceAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"seq_dim": seq_dim}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "StridedSlice",
+ Type: "ReverseSequence",
Input: []tf.Input{
- input, begin, end, strides,
+ input, seq_lengths,
},
Attrs: attrs,
}
@@ -5186,96 +4664,111 @@ func StridedSlice(scope *Scope, input tf.Output, begin tf.Output, end tf.Output,
return op.Output(0)
}
-// Interleave the values from the `data` tensors into a single tensor.
-//
-// Builds a merged tensor such that
+// DepthwiseConv2dNativeAttr is an optional argument to DepthwiseConv2dNative.
+type DepthwiseConv2dNativeAttr func(optionalAttr)
+
+// DepthwiseConv2dNativeDataFormat sets the optional data_format attribute to value.
//
-// ```python
-// merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
-// ```
+// value: Specify the data format of the input and output data. With the
+// default format "NHWC", the data is stored in the order of:
+// [batch, height, width, channels].
+// Alternatively, the format could be "NCHW", the data storage order of:
+// [batch, channels, height, width].
+// If not specified, defaults to "NHWC"
+func DepthwiseConv2dNativeDataFormat(value string) DepthwiseConv2dNativeAttr {
+ return func(m optionalAttr) {
+ m["data_format"] = value
+ }
+}
+
+// DepthwiseConv2dNativeDilations sets the optional dilations attribute to value.
//
-// For example, if each `indices[m]` is scalar or vector, we have
+// value: 1-D tensor of length 4. The dilation factor for each dimension of
+// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
+// element on that dimension. The dimension order is determined by the value of
+// `data_format`, see above for details. Dilations in the batch and depth
+// dimensions must be 1.
+// If not specified, defaults to <i:1 i:1 i:1 i:1 >
+func DepthwiseConv2dNativeDilations(value []int64) DepthwiseConv2dNativeAttr {
+ return func(m optionalAttr) {
+ m["dilations"] = value
+ }
+}
+
+// Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors.
//
-// ```python
-// # Scalar indices:
-// merged[indices[m], ...] = data[m][...]
+// Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
+// and a filter / kernel tensor of shape
+// `[filter_height, filter_width, in_channels, channel_multiplier]`, containing
+// `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies
+// a different filter to each input channel (expanding from 1 channel to
+// `channel_multiplier` channels for each), then concatenates the results
+// together. Thus, the output has `in_channels * channel_multiplier` channels.
//
-// # Vector indices:
-// merged[indices[m][i], ...] = data[m][i, ...]
// ```
-//
-// Each `data[i].shape` must start with the corresponding `indices[i].shape`,
-// and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we
-// must have `data[i].shape = indices[i].shape + constant`. In terms of this
-// `constant`, the output shape is
-//
-// merged.shape = [max(indices)] + constant
-//
-// Values may be merged in parallel, so if an index appears in both `indices[m][i]`
-// and `indices[n][j]`, the result may be invalid. This differs from the normal
-// DynamicStitch operator that defines the behavior in that case.
-//
-// For example:
-//
-// ```python
-// indices[0] = 6
-// indices[1] = [4, 1]
-// indices[2] = [[5, 2], [0, 3]]
-// data[0] = [61, 62]
-// data[1] = [[41, 42], [11, 12]]
-// data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
-// merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
-// [51, 52], [61, 62]]
+// for k in 0..in_channels-1
+// for q in 0..channel_multiplier-1
+// output[b, i, j, k * channel_multiplier + q] =
+// sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *
+// filter[di, dj, k, q]
// ```
//
-// This method can be used to merge partitions created by `dynamic_partition`
-// as illustrated on the following example:
+// Must have `strides[0] = strides[3] = 1`. For the most common case of the same
+// horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
//
-// ```python
-// # Apply function (increments x_i) on elements for which a certain condition
-// # apply (x_i != -1 in this example).
-// x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
-// condition_mask=tf.not_equal(x,tf.constant(-1.))
-// partitioned_data = tf.dynamic_partition(
-// x, tf.cast(condition_mask, tf.int32) , 2)
-// partitioned_data[1] = partitioned_data[1] + 1.0
-// condition_indices = tf.dynamic_partition(
-// tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
-// x = tf.dynamic_stitch(condition_indices, partitioned_data)
-// # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
-// # unchanged.
-// ```
+// Arguments:
//
-// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-// <img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
-// </div>
-func ParallelDynamicStitch(scope *Scope, indices []tf.Output, data []tf.Output) (merged tf.Output) {
+//
+// strides: 1-D of length 4. The stride of the sliding window for each dimension
+// of `input`.
+// padding: The type of padding algorithm to use.
+func DepthwiseConv2dNative(scope *Scope, input tf.Output, filter tf.Output, strides []int64, padding string, optional ...DepthwiseConv2dNativeAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"strides": strides, "padding": padding}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "ParallelDynamicStitch",
+ Type: "DepthwiseConv2dNative",
Input: []tf.Input{
- tf.OutputList(indices), tf.OutputList(data),
+ input, filter,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// TensorArrayGatherV2Attr is an optional argument to TensorArrayGatherV2.
-type TensorArrayGatherV2Attr func(optionalAttr)
+// TensorArrayGatherV3Attr is an optional argument to TensorArrayGatherV3.
+type TensorArrayGatherV3Attr func(optionalAttr)
-// TensorArrayGatherV2ElementShape sets the optional element_shape attribute to value.
+// TensorArrayGatherV3ElementShape sets the optional element_shape attribute to value.
+//
+// value: The expected shape of an element, if known. Used to
+// validate the shapes of TensorArray elements. If this shape is not
+// fully specified, gathering zero-size TensorArrays is an error.
// If not specified, defaults to <unknown_rank:true >
-func TensorArrayGatherV2ElementShape(value tf.Shape) TensorArrayGatherV2Attr {
+func TensorArrayGatherV3ElementShape(value tf.Shape) TensorArrayGatherV3Attr {
return func(m optionalAttr) {
m["element_shape"] = value
}
}
-// Deprecated. Use TensorArrayGatherV3
-func TensorArrayGatherV2(scope *Scope, handle tf.Output, indices tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayGatherV2Attr) (value tf.Output) {
+// Gather specific elements from the TensorArray into output `value`.
+//
+// All elements selected by `indices` must have the same shape.
+//
+// Arguments:
+// handle: The handle to a TensorArray.
+// indices: The locations in the TensorArray from which to read tensor elements.
+// flow_in: A float scalar that enforces proper chaining of operations.
+// dtype: The type of the elem that is returned.
+//
+// Returns All of the elements in the TensorArray, concatenated along a new
+// axis (the new dimension 0).
+func TensorArrayGatherV3(scope *Scope, handle tf.Output, indices tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayGatherV3Attr) (value tf.Output) {
if scope.Err() != nil {
return
}
@@ -5284,7 +4777,7 @@ func TensorArrayGatherV2(scope *Scope, handle tf.Output, indices tf.Output, flow
a(attrs)
}
opspec := tf.OpSpec{
- Type: "TensorArrayGatherV2",
+ Type: "TensorArrayGatherV3",
Input: []tf.Input{
handle, indices, flow_in,
},
@@ -5294,245 +4787,277 @@ func TensorArrayGatherV2(scope *Scope, handle tf.Output, indices tf.Output, flow
return op.Output(0)
}
-// Interleave the values from the `data` tensors into a single tensor.
-//
-// Builds a merged tensor such that
-//
-// ```python
-// merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
-// ```
-//
-// For example, if each `indices[m]` is scalar or vector, we have
-//
-// ```python
-// # Scalar indices:
-// merged[indices[m], ...] = data[m][...]
-//
-// # Vector indices:
-// merged[indices[m][i], ...] = data[m][i, ...]
-// ```
-//
-// Each `data[i].shape` must start with the corresponding `indices[i].shape`,
-// and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we
-// must have `data[i].shape = indices[i].shape + constant`. In terms of this
-// `constant`, the output shape is
-//
-// merged.shape = [max(indices)] + constant
-//
-// Values are merged in order, so if an index appears in both `indices[m][i]` and
-// `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the
-// merged result. If you do not need this guarantee, ParallelDynamicStitch might
-// perform better on some devices.
-//
-// For example:
-//
-// ```python
-// indices[0] = 6
-// indices[1] = [4, 1]
-// indices[2] = [[5, 2], [0, 3]]
-// data[0] = [61, 62]
-// data[1] = [[41, 42], [11, 12]]
-// data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
-// merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
-// [51, 52], [61, 62]]
-// ```
+// Converts each string in the input Tensor to its hash mod by a number of buckets.
//
-// This method can be used to merge partitions created by `dynamic_partition`
-// as illustrated on the following example:
+// The hash function is deterministic on the content of the string within the
+// process and will never change. However, it is not suitable for cryptography.
+// This function may be used when CPU time is scarce and inputs are trusted or
+// unimportant. There is a risk of adversaries constructing inputs that all hash
+// to the same bucket. To prevent this problem, use a strong hash function with
+// `tf.string_to_hash_bucket_strong`.
//
-// ```python
-// # Apply function (increments x_i) on elements for which a certain condition
-// # apply (x_i != -1 in this example).
-// x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
-// condition_mask=tf.not_equal(x,tf.constant(-1.))
-// partitioned_data = tf.dynamic_partition(
-// x, tf.cast(condition_mask, tf.int32) , 2)
-// partitioned_data[1] = partitioned_data[1] + 1.0
-// condition_indices = tf.dynamic_partition(
-// tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
-// x = tf.dynamic_stitch(condition_indices, partitioned_data)
-// # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
-// # unchanged.
-// ```
+// Arguments:
+// input: The strings to assign a hash bucket.
+// num_buckets: The number of buckets.
//
-// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-// <img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
-// </div>
-func DynamicStitch(scope *Scope, indices []tf.Output, data []tf.Output) (merged tf.Output) {
+// Returns A Tensor of the same shape as the input `string_tensor`.
+func StringToHashBucketFast(scope *Scope, input tf.Output, num_buckets int64) (output tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"num_buckets": num_buckets}
opspec := tf.OpSpec{
- Type: "DynamicStitch",
+ Type: "StringToHashBucketFast",
Input: []tf.Input{
- tf.OutputList(indices), tf.OutputList(data),
+ input,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Produces a summary of any statistics recorded by the given statistics manager.
-func StatsAggregatorSummary(scope *Scope, iterator tf.Output) (summary tf.Output) {
+// Returns the max of x and y (i.e. x > y ? x : y) element-wise.
+//
+// *NOTE*: `Maximum` supports broadcasting. More about broadcasting
+// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+func Maximum(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "StatsAggregatorSummary",
+ Type: "Maximum",
Input: []tf.Input{
- iterator,
+ x, y,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// FIFOQueueV2Attr is an optional argument to FIFOQueueV2.
-type FIFOQueueV2Attr func(optionalAttr)
-
-// FIFOQueueV2Shapes sets the optional shapes attribute to value.
+// Outputs all keys and values in the table.
//
-// value: The shape of each component in a value. The length of this attr must
-// be either 0 or the same as the length of component_types. If the length of
-// this attr is 0, the shapes of queue elements are not constrained, and
-// only one element may be dequeued at a time.
-// If not specified, defaults to <>
+// Arguments:
+// table_handle: Handle to the table.
//
-// REQUIRES: len(value) >= 0
-func FIFOQueueV2Shapes(value []tf.Shape) FIFOQueueV2Attr {
- return func(m optionalAttr) {
- m["shapes"] = value
- }
-}
-
-// FIFOQueueV2Capacity sets the optional capacity attribute to value.
//
-// value: The upper bound on the number of elements in this queue.
-// Negative numbers mean no limit.
-// If not specified, defaults to -1
-func FIFOQueueV2Capacity(value int64) FIFOQueueV2Attr {
- return func(m optionalAttr) {
- m["capacity"] = value
+//
+// Returns Vector of all keys present in the table.Tensor of all values in the table. Indexed in parallel with `keys`.
+func LookupTableExportV2(scope *Scope, table_handle tf.Output, Tkeys tf.DataType, Tvalues tf.DataType) (keys tf.Output, values tf.Output) {
+ if scope.Err() != nil {
+ return
}
+ attrs := map[string]interface{}{"Tkeys": Tkeys, "Tvalues": Tvalues}
+ opspec := tf.OpSpec{
+ Type: "LookupTableExportV2",
+ Input: []tf.Input{
+ table_handle,
+ },
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0), op.Output(1)
}
-// FIFOQueueV2Container sets the optional container attribute to value.
+// Real-valued fast Fourier transform.
//
-// value: If non-empty, this queue is placed in the given container.
-// Otherwise, a default container is used.
-// If not specified, defaults to ""
-func FIFOQueueV2Container(value string) FIFOQueueV2Attr {
- return func(m optionalAttr) {
- m["container"] = value
+// Computes the 1-dimensional discrete Fourier transform of a real-valued signal
+// over the inner-most dimension of `input`.
+//
+// Since the DFT of a real signal is Hermitian-symmetric, `RFFT` only returns the
+// `fft_length / 2 + 1` unique components of the FFT: the zero-frequency term,
+// followed by the `fft_length / 2` positive-frequency terms.
+//
+// Along the axis `RFFT` is computed on, if `fft_length` is smaller than the
+// corresponding dimension of `input`, the dimension is cropped. If it is larger,
+// the dimension is padded with zeros.
+//
+// Arguments:
+// input: A float32 tensor.
+// fft_length: An int32 tensor of shape [1]. The FFT length.
+//
+// Returns A complex64 tensor of the same rank as `input`. The inner-most
+// dimension of `input` is replaced with the `fft_length / 2 + 1` unique
+// frequency components of its 1D Fourier transform.
+//
+// @compatibility(numpy)
+// Equivalent to np.fft.rfft
+// @end_compatibility
+func RFFT(scope *Scope, input tf.Output, fft_length tf.Output) (output tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "RFFT",
+ Input: []tf.Input{
+ input, fft_length,
+ },
}
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// FIFOQueueV2SharedName sets the optional shared_name attribute to value.
-//
-// value: If non-empty, this queue will be shared under the given name
-// across multiple sessions.
-// If not specified, defaults to ""
-func FIFOQueueV2SharedName(value string) FIFOQueueV2Attr {
+// ComplexAttr is an optional argument to Complex.
+type ComplexAttr func(optionalAttr)
+
+// ComplexTout sets the optional Tout attribute to value.
+// If not specified, defaults to DT_COMPLEX64
+func ComplexTout(value tf.DataType) ComplexAttr {
return func(m optionalAttr) {
- m["shared_name"] = value
+ m["Tout"] = value
}
}
-// A queue that produces elements in first-in first-out order.
+// Converts two real numbers to a complex number.
//
-// Arguments:
-// component_types: The type of each component in a value.
+// Given a tensor `real` representing the real part of a complex number, and a
+// tensor `imag` representing the imaginary part of a complex number, this
+// operation returns complex numbers elementwise of the form \\(a + bj\\), where
+// *a* represents the `real` part and *b* represents the `imag` part.
//
-// Returns The handle to the queue.
-func FIFOQueueV2(scope *Scope, component_types []tf.DataType, optional ...FIFOQueueV2Attr) (handle tf.Output) {
+// The input tensors `real` and `imag` must have the same shape.
+//
+// For example:
+//
+// ```
+// # tensor 'real' is [2.25, 3.25]
+// # tensor `imag` is [4.75, 5.75]
+// tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
+// ```
+func Complex(scope *Scope, real tf.Output, imag tf.Output, optional ...ComplexAttr) (out tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"component_types": component_types}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "FIFOQueueV2",
-
+ Type: "Complex",
+ Input: []tf.Input{
+ real, imag,
+ },
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Converts the given `resource_handle` representing an iterator to a variant tensor.
+// ImagAttr is an optional argument to Imag.
+type ImagAttr func(optionalAttr)
+
+// ImagTout sets the optional Tout attribute to value.
+// If not specified, defaults to DT_FLOAT
+func ImagTout(value tf.DataType) ImagAttr {
+ return func(m optionalAttr) {
+ m["Tout"] = value
+ }
+}
+
+// Returns the imaginary part of a complex number.
//
-// Arguments:
-// resource_handle: A handle to an iterator resource.
+// Given a tensor `input` of complex numbers, this operation returns a tensor of
+// type `float` that is the imaginary part of each element in `input`. All
+// elements in `input` must be complex numbers of the form \\(a + bj\\), where *a*
+// is the real part and *b* is the imaginary part returned by this operation.
//
-// Returns A variant tensor storing the state of the iterator contained in the
-// resource.
-func SerializeIterator(scope *Scope, resource_handle tf.Output) (serialized tf.Output) {
+// For example:
+//
+// ```
+// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
+// tf.imag(input) ==> [4.75, 5.75]
+// ```
+func Imag(scope *Scope, input tf.Output, optional ...ImagAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "SerializeIterator",
+ Type: "Imag",
Input: []tf.Input{
- resource_handle,
+ input,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Return a tensor with the same shape and contents as the input tensor or value.
-func Identity(scope *Scope, input tf.Output) (output tf.Output) {
+// Compute the Hurwitz zeta function \\(\zeta(x, q)\\).
+//
+// The Hurwitz zeta function is defined as:
+//
+//
+// \\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\\)
+func Zeta(scope *Scope, x tf.Output, q tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Identity",
+ Type: "Zeta",
Input: []tf.Input{
- input,
+ x, q,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// IteratorFromStringHandleAttr is an optional argument to IteratorFromStringHandle.
-type IteratorFromStringHandleAttr func(optionalAttr)
+// LRNGradAttr is an optional argument to LRNGrad.
+type LRNGradAttr func(optionalAttr)
-// IteratorFromStringHandleOutputTypes sets the optional output_types attribute to value.
+// LRNGradDepthRadius sets the optional depth_radius attribute to value.
//
-// value: If specified, defines the type of each tuple component in an
-// element produced by the resulting iterator.
-// If not specified, defaults to <>
+// value: A depth radius.
+// If not specified, defaults to 5
+func LRNGradDepthRadius(value int64) LRNGradAttr {
+ return func(m optionalAttr) {
+ m["depth_radius"] = value
+ }
+}
+
+// LRNGradBias sets the optional bias attribute to value.
//
-// REQUIRES: len(value) >= 0
-func IteratorFromStringHandleOutputTypes(value []tf.DataType) IteratorFromStringHandleAttr {
+// value: An offset (usually > 0 to avoid dividing by 0).
+// If not specified, defaults to 1
+func LRNGradBias(value float32) LRNGradAttr {
return func(m optionalAttr) {
- m["output_types"] = value
+ m["bias"] = value
}
}
-// IteratorFromStringHandleOutputShapes sets the optional output_shapes attribute to value.
+// LRNGradAlpha sets the optional alpha attribute to value.
//
-// value: If specified, defines the shape of each tuple component in an
-// element produced by the resulting iterator.
-// If not specified, defaults to <>
+// value: A scale factor, usually positive.
+// If not specified, defaults to 1
+func LRNGradAlpha(value float32) LRNGradAttr {
+ return func(m optionalAttr) {
+ m["alpha"] = value
+ }
+}
+
+// LRNGradBeta sets the optional beta attribute to value.
//
-// REQUIRES: len(value) >= 0
-func IteratorFromStringHandleOutputShapes(value []tf.Shape) IteratorFromStringHandleAttr {
+// value: An exponent.
+// If not specified, defaults to 0.5
+func LRNGradBeta(value float32) LRNGradAttr {
return func(m optionalAttr) {
- m["output_shapes"] = value
+ m["beta"] = value
}
}
-// Converts the given string representing a handle to an iterator to a resource.
+// Gradients for Local Response Normalization.
//
// Arguments:
-// string_handle: A string representation of the given handle.
+// input_grads: 4-D with shape `[batch, height, width, channels]`.
+// input_image: 4-D with shape `[batch, height, width, channels]`.
+// output_image: 4-D with shape `[batch, height, width, channels]`.
//
-// Returns A handle to an iterator resource.
-func IteratorFromStringHandle(scope *Scope, string_handle tf.Output, optional ...IteratorFromStringHandleAttr) (resource_handle tf.Output) {
+// Returns The gradients for LRN.
+func LRNGrad(scope *Scope, input_grads tf.Output, input_image tf.Output, output_image tf.Output, optional ...LRNGradAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -5541,9 +5066,9 @@ func IteratorFromStringHandle(scope *Scope, string_handle tf.Output, optional ..
a(attrs)
}
opspec := tf.OpSpec{
- Type: "IteratorFromStringHandle",
+ Type: "LRNGrad",
Input: []tf.Input{
- string_handle,
+ input_grads, input_image, output_image,
},
Attrs: attrs,
}
@@ -5551,21 +5076,33 @@ func IteratorFromStringHandle(scope *Scope, string_handle tf.Output, optional ..
return op.Output(0)
}
-// ShapeNAttr is an optional argument to ShapeN.
-type ShapeNAttr func(optionalAttr)
+// AnyAttr is an optional argument to Any.
+type AnyAttr func(optionalAttr)
-// ShapeNOutType sets the optional out_type attribute to value.
-// If not specified, defaults to DT_INT32
-func ShapeNOutType(value tf.DataType) ShapeNAttr {
+// AnyKeepDims sets the optional keep_dims attribute to value.
+//
+// value: If true, retain reduced dimensions with length 1.
+// If not specified, defaults to false
+func AnyKeepDims(value bool) AnyAttr {
return func(m optionalAttr) {
- m["out_type"] = value
+ m["keep_dims"] = value
}
}
-// Returns shape of tensors.
+// Computes the "logical or" of elements across dimensions of a tensor.
//
-// This operation returns N 1-D integer tensors representing shape of `input[i]s`.
-func ShapeN(scope *Scope, input []tf.Output, optional ...ShapeNAttr) (output []tf.Output) {
+// Reduces `input` along the dimensions given in `axis`. Unless
+// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+// `axis`. If `keep_dims` is true, the reduced dimensions are
+// retained with length 1.
+//
+// Arguments:
+// input: The tensor to reduce.
+// axis: The dimensions to reduce. Must be in the range
+// `[-rank(input), rank(input))`.
+//
+// Returns The reduced tensor.
+func Any(scope *Scope, input tf.Output, axis tf.Output, optional ...AnyAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -5574,88 +5111,227 @@ func ShapeN(scope *Scope, input []tf.Output, optional ...ShapeNAttr) (output []t
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ShapeN",
+ Type: "Any",
Input: []tf.Input{
- tf.OutputList(input),
+ input, axis,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
+// ResourceApplyFtrlAttr is an optional argument to ResourceApplyFtrl.
+type ResourceApplyFtrlAttr func(optionalAttr)
+
+// ResourceApplyFtrlUseLocking sets the optional use_locking attribute to value.
+//
+// value: If `True`, updating of the var and accum tensors will be protected
+// by a lock; otherwise the behavior is undefined, but may exhibit less
+// contention.
+// If not specified, defaults to false
+func ResourceApplyFtrlUseLocking(value bool) ResourceApplyFtrlAttr {
+ return func(m optionalAttr) {
+ m["use_locking"] = value
+ }
+}
+
+// Update '*var' according to the Ftrl-proximal scheme.
+//
+// accum_new = accum + grad * grad
+// linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
+// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
+// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
+// accum = accum_new
+//
+// Arguments:
+// var_: Should be from a Variable().
+// accum: Should be from a Variable().
+// linear: Should be from a Variable().
+// grad: The gradient.
+// lr: Scaling factor. Must be a scalar.
+// l1: L1 regulariation. Must be a scalar.
+// l2: L2 regulariation. Must be a scalar.
+// lr_power: Scaling factor. Must be a scalar.
+//
+// Returns the created operation.
+func ResourceApplyFtrl(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, lr_power tf.Output, optional ...ResourceApplyFtrlAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
- var idx int
- var err error
- if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
- scope.UpdateErr("ShapeN", err)
- return
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
}
- return output
+ opspec := tf.OpSpec{
+ Type: "ResourceApplyFtrl",
+ Input: []tf.Input{
+ var_, accum, linear, grad, lr, l1, l2, lr_power,
+ },
+ Attrs: attrs,
+ }
+ return scope.AddOperation(opspec)
}
-// Converts the given `resource_handle` representing an iterator to a string.
+// RandomUniformAttr is an optional argument to RandomUniform.
+type RandomUniformAttr func(optionalAttr)
+
+// RandomUniformSeed sets the optional seed attribute to value.
+//
+// value: If either `seed` or `seed2` are set to be non-zero, the random number
+// generator is seeded by the given seed. Otherwise, it is seeded by a
+// random seed.
+// If not specified, defaults to 0
+func RandomUniformSeed(value int64) RandomUniformAttr {
+ return func(m optionalAttr) {
+ m["seed"] = value
+ }
+}
+
+// RandomUniformSeed2 sets the optional seed2 attribute to value.
+//
+// value: A second seed to avoid seed collision.
+// If not specified, defaults to 0
+func RandomUniformSeed2(value int64) RandomUniformAttr {
+ return func(m optionalAttr) {
+ m["seed2"] = value
+ }
+}
+
+// Outputs random values from a uniform distribution.
+//
+// The generated values follow a uniform distribution in the range `[0, 1)`. The
+// lower bound 0 is included in the range, while the upper bound 1 is excluded.
//
// Arguments:
-// resource_handle: A handle to an iterator resource.
+// shape: The shape of the output tensor.
+// dtype: The type of the output.
//
-// Returns A string representation of the given handle.
-func IteratorToStringHandle(scope *Scope, resource_handle tf.Output) (string_handle tf.Output) {
+// Returns A tensor of the specified shape filled with uniform random values.
+func RandomUniform(scope *Scope, shape tf.Output, dtype tf.DataType, optional ...RandomUniformAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"dtype": dtype}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "IteratorToStringHandle",
+ Type: "RandomUniform",
Input: []tf.Input{
- resource_handle,
+ shape,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Outputs the single element from the given dataset.
+// AssertAttr is an optional argument to Assert.
+type AssertAttr func(optionalAttr)
+
+// AssertSummarize sets the optional summarize attribute to value.
//
-// Arguments:
-// dataset: A handle to a dataset that contains a single element.
+// value: Print this many entries of each tensor.
+// If not specified, defaults to 3
+func AssertSummarize(value int64) AssertAttr {
+ return func(m optionalAttr) {
+ m["summarize"] = value
+ }
+}
+
+// Asserts that the given condition is true.
//
+// If `condition` evaluates to false, print the list of tensors in `data`.
+// `summarize` determines how many entries of the tensors to print.
//
+// Arguments:
+// condition: The condition to evaluate.
+// data: The tensors to print out when condition is false.
//
-// Returns The components of the single element of `input`.
-func DatasetToSingleElement(scope *Scope, dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) {
+// Returns the created operation.
+func Assert(scope *Scope, condition tf.Output, data []tf.Output, optional ...AssertAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "DatasetToSingleElement",
+ Type: "Assert",
Input: []tf.Input{
- dataset,
+ condition, tf.OutputList(data),
},
Attrs: attrs,
}
- op := scope.AddOperation(opspec)
+ return scope.AddOperation(opspec)
+}
+
+// Computes element-wise population count (a.k.a. popcount, bitsum, bitcount).
+//
+// For each entry in `x`, calculates the number of `1` (on) bits in the binary
+// representation of that entry.
+//
+// **NOTE**: It is more efficient to first `tf.bitcast` your tensors into
+// `int32` or `int64` and perform the bitcount on the result, than to feed in
+// 8- or 16-bit inputs and then aggregate the resulting counts.
+func PopulationCount(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err() != nil {
return
}
- var idx int
- var err error
- if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
- scope.UpdateErr("DatasetToSingleElement", err)
- return
+ opspec := tf.OpSpec{
+ Type: "PopulationCount",
+ Input: []tf.Input{
+ x,
+ },
}
- return components
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Gets the next output from the given iterator.
-func IteratorGetNext(scope *Scope, iterator tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) {
+// Split a `SparseTensor` into `num_split` tensors along one dimension.
+//
+// If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices
+// `[0 : shape[split_dim] % num_split]` gets one extra dimension.
+// For example, if `split_dim = 1` and `num_split = 2` and the input is
+//
+// input_tensor = shape = [2, 7]
+// [ a d e ]
+// [b c ]
+//
+// Graphically the output tensors are:
+//
+// output_tensor[0] = shape = [2, 4]
+// [ a ]
+// [b c ]
+//
+// output_tensor[1] = shape = [2, 3]
+// [ d e ]
+// [ ]
+//
+// Arguments:
+// split_dim: 0-D. The dimension along which to split. Must be in the range
+// `[0, rank(shape))`.
+// indices: 2-D tensor represents the indices of the sparse tensor.
+// values: 1-D tensor represents the values of the sparse tensor.
+// shape: 1-D. tensor represents the shape of the sparse tensor.
+// output indices: A list of 1-D tensors represents the indices of the output
+// sparse tensors.
+// num_split: The number of ways to split.
+//
+// Returns A list of 1-D tensors represents the values of the output sparse
+// tensors.A list of 1-D tensors represents the shape of the output sparse
+// tensors.
+func SparseSplit(scope *Scope, split_dim tf.Output, indices tf.Output, values tf.Output, shape tf.Output, num_split int64) (output_indices []tf.Output, output_values []tf.Output, output_shape []tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
+ attrs := map[string]interface{}{"num_split": num_split}
opspec := tf.OpSpec{
- Type: "IteratorGetNext",
+ Type: "SparseSplit",
Input: []tf.Input{
- iterator,
+ split_dim, indices, values, shape,
},
Attrs: attrs,
}
@@ -5665,124 +5341,151 @@ func IteratorGetNext(scope *Scope, iterator tf.Output, output_types []tf.DataTyp
}
var idx int
var err error
- if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
- scope.UpdateErr("IteratorGetNext", err)
+ if output_indices, idx, err = makeOutputList(op, idx, "output_indices"); err != nil {
+ scope.UpdateErr("SparseSplit", err)
return
}
- return components
-}
-
-// Makes a new iterator from the given `dataset` and stores it in `iterator`.
-//
-// This operation may be executed multiple times. Each execution will reset the
-// iterator in `iterator` to the first element of `dataset`.
-//
-// Returns the created operation.
-func MakeIterator(scope *Scope, dataset tf.Output, iterator tf.Output) (o *tf.Operation) {
- if scope.Err() != nil {
+ if output_values, idx, err = makeOutputList(op, idx, "output_values"); err != nil {
+ scope.UpdateErr("SparseSplit", err)
return
}
- opspec := tf.OpSpec{
- Type: "MakeIterator",
- Input: []tf.Input{
- dataset, iterator,
- },
+ if output_shape, idx, err = makeOutputList(op, idx, "output_shape"); err != nil {
+ scope.UpdateErr("SparseSplit", err)
+ return
}
- return scope.AddOperation(opspec)
+ return output_indices, output_values, output_shape
}
-// Creates a dataset that emits the records from one or more TFRecord files.
+// Returns the truth value of (x < y) element-wise.
//
-// Arguments:
-// filenames: A scalar or vector containing the name(s) of the file(s) to be
-// read.
-// compression_type: A scalar containing either (i) the empty string (no
-// compression), (ii) "ZLIB", or (iii) "GZIP".
-// buffer_size: A scalar representing the number of bytes to buffer. A value of
-// 0 means no buffering will be performed.
-func TFRecordDataset(scope *Scope, filenames tf.Output, compression_type tf.Output, buffer_size tf.Output) (handle tf.Output) {
+// *NOTE*: `Less` supports broadcasting. More about broadcasting
+// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+func Less(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "TFRecordDataset",
+ Type: "Less",
Input: []tf.Input{
- filenames, compression_type, buffer_size,
+ x, y,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Concatenates quantized tensors along one dimension.
+// QuantizedReluXAttr is an optional argument to QuantizedReluX.
+type QuantizedReluXAttr func(optionalAttr)
+
+// QuantizedReluXOutType sets the optional out_type attribute to value.
+// If not specified, defaults to DT_QUINT8
+func QuantizedReluXOutType(value tf.DataType) QuantizedReluXAttr {
+ return func(m optionalAttr) {
+ m["out_type"] = value
+ }
+}
+
+// Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)`
//
// Arguments:
-// concat_dim: 0-D. The dimension along which to concatenate. Must be in the
-// range [0, rank(values)).
-// values: The `N` Tensors to concatenate. Their ranks and types must match,
-// and their sizes must match in all dimensions except `concat_dim`.
-// input_mins: The minimum scalar values for each of the input tensors.
-// input_maxes: The maximum scalar values for each of the input tensors.
//
-// Returns A `Tensor` with the concatenation of values stacked along the
-// `concat_dim` dimension. This tensor's shape matches that of `values` except
-// in `concat_dim` where it has the sum of the sizes.The float value that the minimum quantized output value represents.The float value that the maximum quantized output value represents.
-func QuantizedConcat(scope *Scope, concat_dim tf.Output, values []tf.Output, input_mins []tf.Output, input_maxes []tf.Output) (output tf.Output, output_min tf.Output, output_max tf.Output) {
+//
+// min_features: The float value that the lowest quantized value represents.
+// max_features: The float value that the highest quantized value represents.
+//
+// Returns Has the same output shape as "features".The float value that the lowest quantized value represents.The float value that the highest quantized value represents.
+func QuantizedReluX(scope *Scope, features tf.Output, max_value tf.Output, min_features tf.Output, max_features tf.Output, optional ...QuantizedReluXAttr) (activations tf.Output, min_activations tf.Output, max_activations tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "QuantizedConcat",
+ Type: "QuantizedReluX",
Input: []tf.Input{
- concat_dim, tf.OutputList(values), tf.OutputList(input_mins), tf.OutputList(input_maxes),
+ features, max_value, min_features, max_features,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0), op.Output(1), op.Output(2)
}
-// Creates a dataset that emits the records from one or more binary files.
+// Applies softmax to a batched N-D `SparseTensor`.
+//
+// The inputs represent an N-D SparseTensor with logical shape `[..., B, C]`
+// (where `N >= 2`), and with indices sorted in the canonical lexicographic order.
+//
+// This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost
+// logical submatrix with shape `[B, C]`, but with the catch that *the implicitly
+// zero elements do not participate*. Specifically, the algorithm is equivalent
+// to the following:
+//
+// (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix
+// with shape `[B, C]`, along the size-C dimension;
+// (2) Masks out the original implicitly-zero locations;
+// (3) Renormalizes the remaining elements.
+//
+// Hence, the `SparseTensor` result has exactly the same non-zero indices and
+// shape.
//
// Arguments:
-// filenames: A scalar or a vector containing the name(s) of the file(s) to be
-// read.
-// header_bytes: A scalar representing the number of bytes to skip at the
-// beginning of a file.
-// record_bytes: A scalar representing the number of bytes in each record.
-// footer_bytes: A scalar representing the number of bytes to skip at the end
-// of a file.
-// buffer_size: A scalar representing the number of bytes to buffer. Must be > 0.
-func FixedLengthRecordDataset(scope *Scope, filenames tf.Output, header_bytes tf.Output, record_bytes tf.Output, footer_bytes tf.Output, buffer_size tf.Output) (handle tf.Output) {
+// sp_indices: 2-D. `NNZ x R` matrix with the indices of non-empty values in a
+// SparseTensor, in canonical ordering.
+// sp_values: 1-D. `NNZ` non-empty values corresponding to `sp_indices`.
+// sp_shape: 1-D. Shape of the input SparseTensor.
+//
+// Returns 1-D. The `NNZ` values for the result `SparseTensor`.
+func SparseSoftmax(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "FixedLengthRecordDataset",
+ Type: "SparseSoftmax",
Input: []tf.Input{
- filenames, header_bytes, record_bytes, footer_bytes, buffer_size,
+ sp_indices, sp_values, sp_shape,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Creates a dataset that executes a SQL query and emits rows of the result set.
-//
-// Arguments:
-// driver_name: The database type. Currently, the only supported type is 'sqlite'.
-// data_source_name: A connection string to connect to the database.
-// query: A SQL query to execute.
-//
+// RandomPoissonAttr is an optional argument to RandomPoisson.
+type RandomPoissonAttr func(optionalAttr)
+
+// RandomPoissonSeed sets the optional seed attribute to value.
+// If not specified, defaults to 0
+func RandomPoissonSeed(value int64) RandomPoissonAttr {
+ return func(m optionalAttr) {
+ m["seed"] = value
+ }
+}
+
+// RandomPoissonSeed2 sets the optional seed2 attribute to value.
+// If not specified, defaults to 0
+func RandomPoissonSeed2(value int64) RandomPoissonAttr {
+ return func(m optionalAttr) {
+ m["seed2"] = value
+ }
+}
+
+// Use RandomPoissonV2 instead.
//
-func SqlDataset(scope *Scope, driver_name tf.Output, data_source_name tf.Output, query tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
+// DEPRECATED at GraphDef version 25: Replaced by RandomPoissonV2
+func RandomPoisson(scope *Scope, shape tf.Output, rate tf.Output, optional ...RandomPoissonAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "SqlDataset",
+ Type: "RandomPoisson",
Input: []tf.Input{
- driver_name, data_source_name, query,
+ shape, rate,
},
Attrs: attrs,
}
@@ -5790,190 +5493,223 @@ func SqlDataset(scope *Scope, driver_name tf.Output, data_source_name tf.Output,
return op.Output(0)
}
-// PlaceholderAttr is an optional argument to Placeholder.
-type PlaceholderAttr func(optionalAttr)
+// MaxPoolGradV2Attr is an optional argument to MaxPoolGradV2.
+type MaxPoolGradV2Attr func(optionalAttr)
-// PlaceholderShape sets the optional shape attribute to value.
+// MaxPoolGradV2DataFormat sets the optional data_format attribute to value.
//
-// value: (Optional) The shape of the tensor. If the shape has 0 dimensions, the
-// shape is unconstrained.
-// If not specified, defaults to <unknown_rank:true >
-func PlaceholderShape(value tf.Shape) PlaceholderAttr {
+// value: Specify the data format of the input and output data. With the
+// default format "NHWC", the data is stored in the order of:
+// [batch, in_height, in_width, in_channels].
+// Alternatively, the format could be "NCHW", the data storage order of:
+// [batch, in_channels, in_height, in_width].
+// If not specified, defaults to "NHWC"
+func MaxPoolGradV2DataFormat(value string) MaxPoolGradV2Attr {
return func(m optionalAttr) {
- m["shape"] = value
+ m["data_format"] = value
}
}
-// A placeholder op for a value that will be fed into the computation.
-//
-// N.B. This operation will fail with an error if it is executed. It is
-// intended as a way to represent a value that will always be fed, and to
-// provide attrs that enable the fed value to be checked at runtime.
+// Computes gradients of the maxpooling function.
//
// Arguments:
-// dtype: The type of elements in the tensor.
+// orig_input: The original input tensor.
+// orig_output: The original output tensor.
+// grad: 4-D. Gradients w.r.t. the output of `max_pool`.
+// ksize: The size of the window for each dimension of the input tensor.
+// strides: The stride of the sliding window for each dimension of the
+// input tensor.
+// padding: The type of padding algorithm to use.
//
-// Returns A placeholder tensor that must be replaced using the feed mechanism.
-func Placeholder(scope *Scope, dtype tf.DataType, optional ...PlaceholderAttr) (output tf.Output) {
+// Returns Gradients w.r.t. the input to `max_pool`.
+func MaxPoolGradV2(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize tf.Output, strides tf.Output, padding string, optional ...MaxPoolGradV2Attr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtype": dtype}
+ attrs := map[string]interface{}{"padding": padding}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "Placeholder",
-
+ Type: "MaxPoolGradV2",
+ Input: []tf.Input{
+ orig_input, orig_output, grad, ksize, strides,
+ },
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Creates a dataset that caches elements from `input_dataset`.
+// Restore a reader to a previously saved state.
//
-// A CacheDataset will iterate over the input_dataset, and store tensors. If the
-// cache already exists, the cache will be used. If the cache is inappropriate
-// (e.g. cannot be opened, contains tensors of the wrong shape / size), an error
-// will the returned when used.
+// Not all Readers support being restored, so this can produce an
+// Unimplemented error.
//
// Arguments:
+// reader_handle: Handle to a Reader.
+// state: Result of a ReaderSerializeState of a Reader with type
+// matching reader_handle.
//
-// filename: A path on the filesystem where we should cache the dataset. Note: this
-// will be a directory.
-//
-//
-func CacheDataset(scope *Scope, input_dataset tf.Output, filename tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
+// Returns the created operation.
+func ReaderRestoreStateV2(scope *Scope, reader_handle tf.Output, state tf.Output) (o *tf.Operation) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
opspec := tf.OpSpec{
- Type: "CacheDataset",
+ Type: "ReaderRestoreStateV2",
Input: []tf.Input{
- input_dataset, filename,
+ reader_handle, state,
},
- Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// Creates a dataset that shuffles and repeats elements from `input_dataset`
+// ResourceSparseApplyFtrlV2Attr is an optional argument to ResourceSparseApplyFtrlV2.
+type ResourceSparseApplyFtrlV2Attr func(optionalAttr)
+
+// ResourceSparseApplyFtrlV2UseLocking sets the optional use_locking attribute to value.
//
-// pseudorandomly.
+// value: If `True`, updating of the var and accum tensors will be protected
+// by a lock; otherwise the behavior is undefined, but may exhibit less
+// contention.
+// If not specified, defaults to false
+func ResourceSparseApplyFtrlV2UseLocking(value bool) ResourceSparseApplyFtrlV2Attr {
+ return func(m optionalAttr) {
+ m["use_locking"] = value
+ }
+}
+
+// Update relevant entries in '*var' according to the Ftrl-proximal scheme.
//
-// Arguments:
+// That is for rows we have grad for, we update var, accum and linear as follows:
+// grad_with_shrinkage = grad + 2 * l2_shrinkage * var
+// accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
+// linear += grad_with_shrinkage +
+// (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
+// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
+// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
+// accum = accum_new
//
-// buffer_size: The number of output elements to buffer in an iterator over
-// this dataset. Compare with the `min_after_dequeue` attr when creating a
-// `RandomShuffleQueue`.
-// seed: A scalar seed for the random number generator. If either `seed` or
-// `seed2` is set to be non-zero, the random number generator is seeded
-// by the given seed. Otherwise, a random seed is used.
-// seed2: A second scalar seed to avoid seed collision.
-// count: A scalar representing the number of times the underlying dataset
-// should be repeated. The default is `-1`, which results in infinite repetition.
+// Arguments:
+// var_: Should be from a Variable().
+// accum: Should be from a Variable().
+// linear: Should be from a Variable().
+// grad: The gradient.
+// indices: A vector of indices into the first dimension of var and accum.
+// lr: Scaling factor. Must be a scalar.
+// l1: L1 regularization. Must be a scalar.
+// l2: L2 shrinkage regulariation. Must be a scalar.
//
+// lr_power: Scaling factor. Must be a scalar.
//
-func ShuffleAndRepeatDataset(scope *Scope, input_dataset tf.Output, buffer_size tf.Output, seed tf.Output, seed2 tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
+// Returns the created operation.
+func ResourceSparseApplyFtrlV2(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, indices tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, l2_shrinkage tf.Output, lr_power tf.Output, optional ...ResourceSparseApplyFtrlV2Attr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "ShuffleAndRepeatDataset",
+ Type: "ResourceSparseApplyFtrlV2",
Input: []tf.Input{
- input_dataset, buffer_size, seed, seed2, count,
+ var_, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, lr_power,
},
Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// Creates a Dataset that returns pseudorandom numbers.
-//
-// Arguments:
-// seed: A scalar seed for the random number generator. If either seed or
-// seed2 is set to be non-zero, the random number generator is seeded
-// by the given seed. Otherwise, a random seed is used.
-// seed2: A second scalar seed to avoid seed collision.
-//
+// Associates the given iterator with the given statistics aggregator.
//
-func RandomDataset(scope *Scope, seed tf.Output, seed2 tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
+// Returns the created operation.
+func IteratorSetStatsAggregator(scope *Scope, iterator_handle tf.Output, stats_aggregator_handle tf.Output) (o *tf.Operation) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
opspec := tf.OpSpec{
- Type: "RandomDataset",
+ Type: "IteratorSetStatsAggregator",
Input: []tf.Input{
- seed, seed2,
+ iterator_handle, stats_aggregator_handle,
},
- Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// Identity op for gradient debugging.
-//
-// This op is hidden from public in Python. It is used by TensorFlow Debugger to
-// register gradient tensors for gradient debugging.
-// This op operates on non-reference-type tensors.
-func DebugGradientIdentity(scope *Scope, input tf.Output) (output tf.Output) {
+// Returns element-wise smallest integer in not less than x.
+func Ceil(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "DebugGradientIdentity",
+ Type: "Ceil",
Input: []tf.Input{
- input,
+ x,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Deprecated. Use TensorArrayGradV3
-func TensorArrayGradV2(scope *Scope, handle tf.Output, flow_in tf.Output, source string) (grad_handle tf.Output) {
+// Computes the number of elements in the given table.
+//
+// Arguments:
+// table_handle: Handle to the table.
+//
+// Returns Scalar that contains number of elements in the table.
+func LookupTableSizeV2(scope *Scope, table_handle tf.Output) (size tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"source": source}
opspec := tf.OpSpec{
- Type: "TensorArrayGradV2",
+ Type: "LookupTableSizeV2",
Input: []tf.Input{
- handle, flow_in,
+ table_handle,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Creates a dataset that batches input elements into a SparseTensor.
+// ResizeBilinearGradAttr is an optional argument to ResizeBilinearGrad.
+type ResizeBilinearGradAttr func(optionalAttr)
+
+// ResizeBilinearGradAlignCorners sets the optional align_corners attribute to value.
//
-// Arguments:
-// input_dataset: A handle to an input dataset. Must have a single component.
-// batch_size: A scalar representing the number of elements to accumulate in a
-// batch.
-// row_shape: A vector representing the dense shape of each row in the produced
-// SparseTensor. The shape may be partially specified, using `-1` to indicate
-// that a particular dimension should use the maximum size of all batch elements.
+// value: If true, rescale grads by (orig_height - 1) / (height - 1), which
+// exactly aligns the 4 corners of grads and original_image. If false, rescale by
+// orig_height / height. Treat similarly the width dimension.
+// If not specified, defaults to false
+func ResizeBilinearGradAlignCorners(value bool) ResizeBilinearGradAttr {
+ return func(m optionalAttr) {
+ m["align_corners"] = value
+ }
+}
+
+// Computes the gradient of bilinear interpolation.
//
+// Arguments:
+// grads: 4-D with shape `[batch, height, width, channels]`.
+// original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`,
+// The image tensor that was resized.
//
-func DenseToSparseBatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, row_shape tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
+// Returns 4-D with shape `[batch, orig_height, orig_width, channels]`.
+// Gradients with respect to the input image. Input image must have been
+// float or double.
+func ResizeBilinearGrad(scope *Scope, grads tf.Output, original_image tf.Output, optional ...ResizeBilinearGradAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "DenseToSparseBatchDataset",
+ Type: "ResizeBilinearGrad",
Input: []tf.Input{
- input_dataset, batch_size, row_shape,
+ grads, original_image,
},
Attrs: attrs,
}
@@ -5981,156 +5717,303 @@ func DenseToSparseBatchDataset(scope *Scope, input_dataset tf.Output, batch_size
return op.Output(0)
}
-// Creates a dataset that batches and pads `batch_size` elements from the input.
+// Computes the sum along sparse segments of a tensor divided by the sqrt of N.
+//
+// N is the size of the segment being reduced.
+//
+// Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+// segments.
//
// Arguments:
//
-// batch_size: A scalar representing the number of elements to accumulate in a
-// batch.
-// padded_shapes: A list of int64 tensors representing the desired padded shapes
-// of the corresponding output components. These shapes may be partially
-// specified, using `-1` to indicate that a particular dimension should be
-// padded to the maximum size of all batch elements.
-// padding_values: A list of scalars containing the padding value to use for
-// each of the outputs.
+// indices: A 1-D tensor. Has same rank as `segment_ids`.
+// segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
//
-func PaddedBatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, padded_shapes []tf.Output, padding_values []tf.Output, output_shapes []tf.Shape) (handle tf.Output) {
+// Returns Has same shape as data, except for dimension 0 which
+// has size `k`, the number of segments.
+func SparseSegmentSqrtN(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"output_shapes": output_shapes}
opspec := tf.OpSpec{
- Type: "PaddedBatchDataset",
+ Type: "SparseSegmentSqrtN",
Input: []tf.Input{
- input_dataset, batch_size, tf.OutputList(padded_shapes), tf.OutputList(padding_values),
+ data, indices, segment_ids,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// TensorArrayConcatV2Attr is an optional argument to TensorArrayConcatV2.
-type TensorArrayConcatV2Attr func(optionalAttr)
+// StatelessTruncatedNormalAttr is an optional argument to StatelessTruncatedNormal.
+type StatelessTruncatedNormalAttr func(optionalAttr)
-// TensorArrayConcatV2ElementShapeExcept0 sets the optional element_shape_except0 attribute to value.
-// If not specified, defaults to <unknown_rank:true >
-func TensorArrayConcatV2ElementShapeExcept0(value tf.Shape) TensorArrayConcatV2Attr {
+// StatelessTruncatedNormalDtype sets the optional dtype attribute to value.
+//
+// value: The type of the output.
+// If not specified, defaults to DT_FLOAT
+func StatelessTruncatedNormalDtype(value tf.DataType) StatelessTruncatedNormalAttr {
return func(m optionalAttr) {
- m["element_shape_except0"] = value
+ m["dtype"] = value
}
}
-// Deprecated. Use TensorArrayConcatV3
-func TensorArrayConcatV2(scope *Scope, handle tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayConcatV2Attr) (value tf.Output, lengths tf.Output) {
+// Outputs deterministic pseudorandom values from a truncated normal distribution.
+//
+// The generated values follow a normal distribution with mean 0 and standard
+// deviation 1, except that values whose magnitude is more than 2 standard
+// deviations from the mean are dropped and re-picked.
+//
+// The outputs are a deterministic function of `shape` and `seed`.
+//
+// Arguments:
+// shape: The shape of the output tensor.
+// seed: 2 seeds (shape [2]).
+//
+// Returns Random values with specified shape.
+func StatelessTruncatedNormal(scope *Scope, shape tf.Output, seed tf.Output, optional ...StatelessTruncatedNormalAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtype": dtype}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "TensorArrayConcatV2",
+ Type: "StatelessTruncatedNormal",
Input: []tf.Input{
- handle, flow_in,
+ shape, seed,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
+ return op.Output(0)
}
-// Converts the given variant tensor to an iterator and stores it in the given resource.
+// RestoreSliceAttr is an optional argument to RestoreSlice.
+type RestoreSliceAttr func(optionalAttr)
+
+// RestoreSlicePreferredShard sets the optional preferred_shard attribute to value.
+//
+// value: Index of file to open first if multiple files match
+// `file_pattern`. See the documentation for `Restore`.
+// If not specified, defaults to -1
+func RestoreSlicePreferredShard(value int64) RestoreSliceAttr {
+ return func(m optionalAttr) {
+ m["preferred_shard"] = value
+ }
+}
+
+// Restores a tensor from checkpoint files.
+//
+// This is like `Restore` except that restored tensor can be listed as filling
+// only a slice of a larger tensor. `shape_and_slice` specifies the shape of the
+// larger tensor and the slice that the restored tensor covers.
+//
+// The `shape_and_slice` input has the same format as the
+// elements of the `shapes_and_slices` input of the `SaveSlices` op.
//
// Arguments:
-// resource_handle: A handle to an iterator resource.
-// serialized: A variant tensor storing the state of the iterator contained in the
-// resource.
+// file_pattern: Must have a single element. The pattern of the files from
+// which we read the tensor.
+// tensor_name: Must have a single element. The name of the tensor to be
+// restored.
+// shape_and_slice: Scalar. The shapes and slice specifications to use when
+// restoring a tensors.
+// dt: The type of the tensor to be restored.
//
-// Returns the created operation.
-func DeserializeIterator(scope *Scope, resource_handle tf.Output, serialized tf.Output) (o *tf.Operation) {
+// Returns The restored tensor.
+func RestoreSlice(scope *Scope, file_pattern tf.Output, tensor_name tf.Output, shape_and_slice tf.Output, dt tf.DataType, optional ...RestoreSliceAttr) (tensor tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"dt": dt}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "DeserializeIterator",
+ Type: "RestoreSlice",
Input: []tf.Input{
- resource_handle, serialized,
+ file_pattern, tensor_name, shape_and_slice,
},
+ Attrs: attrs,
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Records the latency of producing `input_dataset` elements in a StatsAggregator.
-func LatencyStatsDataset(scope *Scope, input_dataset tf.Output, tag tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
+// UniqueWithCountsAttr is an optional argument to UniqueWithCounts.
+type UniqueWithCountsAttr func(optionalAttr)
+
+// UniqueWithCountsOutIdx sets the optional out_idx attribute to value.
+// If not specified, defaults to DT_INT32
+func UniqueWithCountsOutIdx(value tf.DataType) UniqueWithCountsAttr {
+ return func(m optionalAttr) {
+ m["out_idx"] = value
+ }
+}
+
+// Finds unique elements in a 1-D tensor.
+//
+// This operation returns a tensor `y` containing all of the unique elements of `x`
+// sorted in the same order that they occur in `x`. This operation also returns a
+// tensor `idx` the same size as `x` that contains the index of each value of `x`
+// in the unique output `y`. Finally, it returns a third tensor `count` that
+// contains the count of each element of `y` in `x`. In other words:
+//
+// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
+//
+// For example:
+//
+// ```
+// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
+// y, idx, count = unique_with_counts(x)
+// y ==> [1, 2, 4, 7, 8]
+// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
+// count ==> [2, 1, 3, 1, 2]
+// ```
+//
+// Arguments:
+// x: 1-D.
+//
+// Returns 1-D.1-D.1-D.
+func UniqueWithCounts(scope *Scope, x tf.Output, optional ...UniqueWithCountsAttr) (y tf.Output, idx tf.Output, count tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "LatencyStatsDataset",
+ Type: "UniqueWithCounts",
Input: []tf.Input{
- input_dataset, tag,
+ x,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// Concatenates tensors along one dimension.
+// StatelessRandomNormalAttr is an optional argument to StatelessRandomNormal.
+type StatelessRandomNormalAttr func(optionalAttr)
+
+// StatelessRandomNormalDtype sets the optional dtype attribute to value.
+//
+// value: The type of the output.
+// If not specified, defaults to DT_FLOAT
+func StatelessRandomNormalDtype(value tf.DataType) StatelessRandomNormalAttr {
+ return func(m optionalAttr) {
+ m["dtype"] = value
+ }
+}
+
+// Outputs deterministic pseudorandom values from a normal distribution.
+//
+// The generated values will have mean 0 and standard deviation 1.
+//
+// The outputs are a deterministic function of `shape` and `seed`.
//
// Arguments:
-// values: List of `N` Tensors to concatenate. Their ranks and types must match,
-// and their sizes must match in all dimensions except `concat_dim`.
-// axis: 0-D. The dimension along which to concatenate. Must be in the
-// range [-rank(values), rank(values)).
+// shape: The shape of the output tensor.
+// seed: 2 seeds (shape [2]).
//
-// Returns A `Tensor` with the concatenation of values stacked along the
-// `concat_dim` dimension. This tensor's shape matches that of `values` except
-// in `concat_dim` where it has the sum of the sizes.
-func ConcatV2(scope *Scope, values []tf.Output, axis tf.Output) (output tf.Output) {
+// Returns Random values with specified shape.
+func StatelessRandomNormal(scope *Scope, shape tf.Output, seed tf.Output, optional ...StatelessRandomNormalAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "ConcatV2",
+ Type: "StatelessRandomNormal",
Input: []tf.Input{
- tf.OutputList(values), axis,
+ shape, seed,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Creates a dataset that contains the elements of `input_dataset` ignoring errors.
-func IgnoreErrorsDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
+// Reshapes a quantized tensor as per the Reshape op.
+//
+// ```
+//
+// Arguments:
+//
+// shape: Defines the shape of the output tensor.
+// input_min: The minimum value of the input.
+// input_max: The maximum value of the input.
+//
+// Returns This value is copied from input_min.This value is copied from input_max.
+func QuantizedReshape(scope *Scope, tensor tf.Output, shape tf.Output, input_min tf.Output, input_max tf.Output) (output tf.Output, output_min tf.Output, output_max tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
opspec := tf.OpSpec{
- Type: "IgnoreErrorsDataset",
+ Type: "QuantizedReshape",
Input: []tf.Input{
- input_dataset,
+ tensor, shape, input_min, input_max,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// Creates a dataset that concatenates `input_dataset` with `another_dataset`.
-func ConcatenateDataset(scope *Scope, input_dataset tf.Output, another_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
+// GatherAttr is an optional argument to Gather.
+type GatherAttr func(optionalAttr)
+
+// GatherValidateIndices sets the optional validate_indices attribute to value.
+// If not specified, defaults to true
+func GatherValidateIndices(value bool) GatherAttr {
+ return func(m optionalAttr) {
+ m["validate_indices"] = value
+ }
+}
+
+// Gather slices from `params` according to `indices`.
+//
+// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
+// Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
+//
+// ```python
+// # Scalar indices
+// output[:, ..., :] = params[indices, :, ... :]
+//
+// # Vector indices
+// output[i, :, ..., :] = params[indices[i], :, ... :]
+//
+// # Higher rank indices
+// output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
+// ```
+//
+// If `indices` is a permutation and `len(indices) == params.shape[0]` then
+// this operation will permute `params` accordingly.
+//
+// `validate_indices`: DEPRECATED. If this operation is assigned to CPU, values in
+// `indices` are always validated to be within range. If assigned to GPU,
+// out-of-bound indices result in safe but unspecified behavior, which may include
+// raising an error.
+//
+// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+// <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
+// </div>
+func Gather(scope *Scope, params tf.Output, indices tf.Output, optional ...GatherAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "ConcatenateDataset",
+ Type: "Gather",
Input: []tf.Input{
- input_dataset, another_dataset,
+ params, indices,
},
Attrs: attrs,
}
@@ -6138,164 +6021,166 @@ func ConcatenateDataset(scope *Scope, input_dataset tf.Output, another_dataset t
return op.Output(0)
}
-// Creates a dataset that splits a SparseTensor into elements row-wise.
-func SparseTensorSliceDataset(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output) (handle tf.Output) {
+// Returns the truth value of (x != y) element-wise.
+//
+// *NOTE*: `NotEqual` supports broadcasting. More about broadcasting
+// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+func NotEqual(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "SparseTensorSliceDataset",
+ Type: "NotEqual",
Input: []tf.Input{
- indices, values, dense_shape,
+ x, y,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Reshapes a tensor.
-//
-// Given `tensor`, this operation returns a tensor that has the same values
-// as `tensor` with shape `shape`.
-//
-// If one component of `shape` is the special value -1, the size of that dimension
-// is computed so that the total size remains constant. In particular, a `shape`
-// of `[-1]` flattens into 1-D. At most one component of `shape` can be -1.
-//
-// If `shape` is 1-D or higher, then the operation returns a tensor with shape
-// `shape` filled with the values of `tensor`. In this case, the number of elements
-// implied by `shape` must be the same as the number of elements in `tensor`.
-//
-// For example:
-//
-// ```
-// # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
-// # tensor 't' has shape [9]
-// reshape(t, [3, 3]) ==> [[1, 2, 3],
-// [4, 5, 6],
-// [7, 8, 9]]
-//
-// # tensor 't' is [[[1, 1], [2, 2]],
-// # [[3, 3], [4, 4]]]
-// # tensor 't' has shape [2, 2, 2]
-// reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
-// [3, 3, 4, 4]]
-//
-// # tensor 't' is [[[1, 1, 1],
-// # [2, 2, 2]],
-// # [[3, 3, 3],
-// # [4, 4, 4]],
-// # [[5, 5, 5],
-// # [6, 6, 6]]]
-// # tensor 't' has shape [3, 2, 3]
-// # pass '[-1]' to flatten 't'
-// reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
+// Inverse 3D real-valued fast Fourier transform.
//
-// # -1 can also be used to infer the shape
+// Computes the inverse 3-dimensional discrete Fourier transform of a real-valued
+// signal over the inner-most 3 dimensions of `input`.
//
-// # -1 is inferred to be 9:
-// reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
-// [4, 4, 4, 5, 5, 5, 6, 6, 6]]
-// # -1 is inferred to be 2:
-// reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
-// [4, 4, 4, 5, 5, 5, 6, 6, 6]]
-// # -1 is inferred to be 3:
-// reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],
-// [2, 2, 2],
-// [3, 3, 3]],
-// [[4, 4, 4],
-// [5, 5, 5],
-// [6, 6, 6]]]
+// The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`:
+// The inner-most dimension contains the `fft_length / 2 + 1` unique components of
+// the DFT of a real-valued signal. If `fft_length` is not provided, it is computed
+// from the size of the inner-most 3 dimensions of `input`. If the FFT length used
+// to compute `input` is odd, it should be provided since it cannot be inferred
+// properly.
//
-// # tensor 't' is [7]
-// # shape `[]` reshapes to a scalar
-// reshape(t, []) ==> 7
-// ```
+// Along each axis `IRFFT3D` is computed on, if `fft_length` (or
+// `fft_length / 2 + 1` for the inner-most dimension) is smaller than the
+// corresponding dimension of `input`, the dimension is cropped. If it is larger,
+// the dimension is padded with zeros.
//
// Arguments:
+// input: A complex64 tensor.
+// fft_length: An int32 tensor of shape [3]. The FFT length for each dimension.
//
-// shape: Defines the shape of the output tensor.
-func Reshape(scope *Scope, tensor tf.Output, shape tf.Output) (output tf.Output) {
+// Returns A float32 tensor of the same rank as `input`. The inner-most 3
+// dimensions of `input` are replaced with the `fft_length` samples of their
+// inverse 3D real Fourier transform.
+//
+// @compatibility(numpy)
+// Equivalent to np.irfftn with 3 dimensions.
+// @end_compatibility
+func IRFFT3D(scope *Scope, input tf.Output, fft_length tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Reshape",
+ Type: "IRFFT3D",
Input: []tf.Input{
- tensor, shape,
+ input, fft_length,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Greedily selects a subset of bounding boxes in descending order of score,
+// StringSplitAttr is an optional argument to StringSplit.
+type StringSplitAttr func(optionalAttr)
+
+// StringSplitSkipEmpty sets the optional skip_empty attribute to value.
//
-// pruning away boxes that have high intersection-over-union (IOU) overlap
-// with previously selected boxes. Bounding boxes are supplied as
-// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
-// diagonal pair of box corners and the coordinates can be provided as normalized
-// (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm
-// is agnostic to where the origin is in the coordinate system. Note that this
-// algorithm is invariant to orthogonal transformations and translations
-// of the coordinate system; thus translating or reflections of the coordinate
-// system result in the same boxes being selected by the algorithm.
+// value: A `bool`. If `True`, skip the empty strings from the result.
+// If not specified, defaults to true
+func StringSplitSkipEmpty(value bool) StringSplitAttr {
+ return func(m optionalAttr) {
+ m["skip_empty"] = value
+ }
+}
+
+// Split elements of `input` based on `delimiter` into a `SparseTensor`.
//
-// The output of this operation is a set of integers indexing into the input
-// collection of bounding boxes representing the selected boxes. The bounding
-// box coordinates corresponding to the selected indices can then be obtained
-// using the `tf.gather operation`. For example:
+// Let N be the size of source (typically N will be the batch size). Split each
+// element of `input` based on `delimiter` and return a `SparseTensor`
+// containing the splitted tokens. Empty tokens are ignored.
//
-// selected_indices = tf.image.non_max_suppression_v2(
-// boxes, scores, max_output_size, iou_threshold)
-// selected_boxes = tf.gather(boxes, selected_indices)
+// `delimiter` can be empty, or a string of split characters. If `delimiter` is an
+// empty string, each element of `input` is split into individual single-byte
+// character strings, including splitting of UTF-8 multibyte sequences. Otherwise
+// every character of `delimiter` is a potential split point.
+//
+// For example:
+// N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output
+// will be
+//
+// indices = [0, 0;
+// 0, 1;
+// 1, 0;
+// 1, 1;
+// 1, 2]
+// shape = [2, 3]
+// values = ['hello', 'world', 'a', 'b', 'c']
//
// Arguments:
-// boxes: A 2-D float tensor of shape `[num_boxes, 4]`.
-// scores: A 1-D float tensor of shape `[num_boxes]` representing a single
-// score corresponding to each box (each row of boxes).
-// max_output_size: A scalar integer tensor representing the maximum number of
-// boxes to be selected by non max suppression.
-// iou_threshold: A 0-D float tensor representing the threshold for deciding whether
-// boxes overlap too much with respect to IOU.
+// input: 1-D. Strings to split.
+// delimiter: 0-D. Delimiter characters (bytes), or empty string.
//
-// Returns A 1-D integer tensor of shape `[M]` representing the selected
-// indices from the boxes tensor, where `M <= max_output_size`.
-func NonMaxSuppressionV2(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, iou_threshold tf.Output) (selected_indices tf.Output) {
+// Returns A dense matrix of int64 representing the indices of the sparse tensor.A vector of strings corresponding to the splited values.a length-2 vector of int64 representing the shape of the sparse
+// tensor, where the first value is N and the second value is the maximum number
+// of tokens in a single input entry.
+func StringSplit(scope *Scope, input tf.Output, delimiter tf.Output, optional ...StringSplitAttr) (indices tf.Output, values tf.Output, shape tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "NonMaxSuppressionV2",
+ Type: "StringSplit",
Input: []tf.Input{
- boxes, scores, max_output_size, iou_threshold,
+ input, delimiter,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// StatsAggregatorHandleAttr is an optional argument to StatsAggregatorHandle.
-type StatsAggregatorHandleAttr func(optionalAttr)
-
-// StatsAggregatorHandleContainer sets the optional container attribute to value.
-// If not specified, defaults to ""
-func StatsAggregatorHandleContainer(value string) StatsAggregatorHandleAttr {
- return func(m optionalAttr) {
- m["container"] = value
- }
-}
+// WriteAudioSummaryAttr is an optional argument to WriteAudioSummary.
+type WriteAudioSummaryAttr func(optionalAttr)
-// StatsAggregatorHandleSharedName sets the optional shared_name attribute to value.
-// If not specified, defaults to ""
-func StatsAggregatorHandleSharedName(value string) StatsAggregatorHandleAttr {
+// WriteAudioSummaryMaxOutputs sets the optional max_outputs attribute to value.
+//
+// value: Max number of batch elements to generate audio for.
+// If not specified, defaults to 3
+//
+// REQUIRES: value >= 1
+func WriteAudioSummaryMaxOutputs(value int64) WriteAudioSummaryAttr {
return func(m optionalAttr) {
- m["shared_name"] = value
+ m["max_outputs"] = value
}
}
-// Creates a statistics manager resource.
-func StatsAggregatorHandle(scope *Scope, optional ...StatsAggregatorHandleAttr) (handle tf.Output) {
+// Writes a `Summary` protocol buffer with audio.
+//
+// The summary has up to `max_outputs` summary values containing audio. The
+// audio is built from `tensor` which must be 3-D with shape `[batch_size,
+// frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
+// assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
+//
+// The `tag` argument is a scalar `Tensor` of type `string`. It is used to
+// build the `tag` of the summary values:
+//
+// * If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
+// * If `max_outputs` is greater than 1, the summary value tags are
+// generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
+//
+// Arguments:
+// writer: A handle to a summary writer.
+// step: The step to write the summary for.
+// tag: Scalar. Used to build the `tag` attribute of the summary values.
+// tensor: 2-D of shape `[batch_size, frames]`.
+// sample_rate: The sample rate of the signal in hertz.
+//
+// Returns the created operation.
+func WriteAudioSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, tensor tf.Output, sample_rate tf.Output, optional ...WriteAudioSummaryAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
@@ -6304,49 +6189,42 @@ func StatsAggregatorHandle(scope *Scope, optional ...StatsAggregatorHandleAttr)
a(attrs)
}
opspec := tf.OpSpec{
- Type: "StatsAggregatorHandle",
-
+ Type: "WriteAudioSummary",
+ Input: []tf.Input{
+ writer, step, tag, tensor, sample_rate,
+ },
Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// CropAndResizeGradBoxesAttr is an optional argument to CropAndResizeGradBoxes.
-type CropAndResizeGradBoxesAttr func(optionalAttr)
+// ProdAttr is an optional argument to Prod.
+type ProdAttr func(optionalAttr)
-// CropAndResizeGradBoxesMethod sets the optional method attribute to value.
+// ProdKeepDims sets the optional keep_dims attribute to value.
//
-// value: A string specifying the interpolation method. Only 'bilinear' is
-// supported for now.
-// If not specified, defaults to "bilinear"
-func CropAndResizeGradBoxesMethod(value string) CropAndResizeGradBoxesAttr {
+// value: If true, retain reduced dimensions with length 1.
+// If not specified, defaults to false
+func ProdKeepDims(value bool) ProdAttr {
return func(m optionalAttr) {
- m["method"] = value
+ m["keep_dims"] = value
}
}
-// Computes the gradient of the crop_and_resize op wrt the input boxes tensor.
+// Computes the product of elements across dimensions of a tensor.
+//
+// Reduces `input` along the dimensions given in `axis`. Unless
+// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+// `axis`. If `keep_dims` is true, the reduced dimensions are
+// retained with length 1.
//
// Arguments:
-// grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
-// image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
-// Both `image_height` and `image_width` need to be positive.
-// boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
-// specifies the coordinates of a box in the `box_ind[i]` image and is specified
-// in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
-// `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
-// `[0, 1]` interval of normalized image height is mapped to
-// `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in
-// which case the sampled crop is an up-down flipped version of the original
-// image. The width dimension is treated similarly. Normalized coordinates
-// outside the `[0, 1]` range are allowed, in which case we use
-// `extrapolation_value` to extrapolate the input image values.
-// box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
-// The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
+// input: The tensor to reduce.
+// axis: The dimensions to reduce. Must be in the range
+// `[-rank(input), rank(input))`.
//
-// Returns A 2-D tensor of shape `[num_boxes, 4]`.
-func CropAndResizeGradBoxes(scope *Scope, grads tf.Output, image tf.Output, boxes tf.Output, box_ind tf.Output, optional ...CropAndResizeGradBoxesAttr) (output tf.Output) {
+// Returns The reduced tensor.
+func Prod(scope *Scope, input tf.Output, axis tf.Output, optional ...ProdAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -6355,9 +6233,9 @@ func CropAndResizeGradBoxes(scope *Scope, grads tf.Output, image tf.Output, boxe
a(attrs)
}
opspec := tf.OpSpec{
- Type: "CropAndResizeGradBoxes",
+ Type: "Prod",
Input: []tf.Input{
- grads, image, boxes, box_ind,
+ input, axis,
},
Attrs: attrs,
}
@@ -6365,48 +6243,44 @@ func CropAndResizeGradBoxes(scope *Scope, grads tf.Output, image tf.Output, boxe
return op.Output(0)
}
-// ShuffleDatasetAttr is an optional argument to ShuffleDataset.
-type ShuffleDatasetAttr func(optionalAttr)
+// ResizeBilinearAttr is an optional argument to ResizeBilinear.
+type ResizeBilinearAttr func(optionalAttr)
-// ShuffleDatasetReshuffleEachIteration sets the optional reshuffle_each_iteration attribute to value.
+// ResizeBilinearAlignCorners sets the optional align_corners attribute to value.
//
-// value: If true, each iterator over this dataset will be given
-// a different pseudorandomly generated seed, based on a sequence seeded by the
-// `seed` and `seed2` inputs. If false, each iterator will be given the same
-// seed, and repeated iteration over this dataset will yield the exact same
-// sequence of results.
-// If not specified, defaults to true
-func ShuffleDatasetReshuffleEachIteration(value bool) ShuffleDatasetAttr {
+// value: If true, rescale input by (new_height - 1) / (height - 1), which
+// exactly aligns the 4 corners of images and resized images. If false, rescale
+// by new_height / height. Treat similarly the width dimension.
+// If not specified, defaults to false
+func ResizeBilinearAlignCorners(value bool) ResizeBilinearAttr {
return func(m optionalAttr) {
- m["reshuffle_each_iteration"] = value
+ m["align_corners"] = value
}
}
-// Creates a dataset that shuffles elements from `input_dataset` pseudorandomly.
-//
-// Arguments:
+// Resize `images` to `size` using bilinear interpolation.
//
-// buffer_size: The number of output elements to buffer in an iterator over
-// this dataset. Compare with the `min_after_dequeue` attr when creating a
-// `RandomShuffleQueue`.
-// seed: A scalar seed for the random number generator. If either `seed` or
-// `seed2` is set to be non-zero, the random number generator is seeded
-// by the given seed. Otherwise, a random seed is used.
-// seed2: A second scalar seed to avoid seed collision.
+// Input images can be of different types but output images are always float.
//
+// Arguments:
+// images: 4-D with shape `[batch, height, width, channels]`.
+// size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The
+// new size for the images.
//
-func ShuffleDataset(scope *Scope, input_dataset tf.Output, buffer_size tf.Output, seed tf.Output, seed2 tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ShuffleDatasetAttr) (handle tf.Output) {
+// Returns 4-D with shape
+// `[batch, new_height, new_width, channels]`.
+func ResizeBilinear(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeBilinearAttr) (resized_images tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ShuffleDataset",
+ Type: "ResizeBilinear",
Input: []tf.Input{
- input_dataset, buffer_size, seed, seed2,
+ images, size,
},
Attrs: attrs,
}
@@ -6414,151 +6288,189 @@ func ShuffleDataset(scope *Scope, input_dataset tf.Output, buffer_size tf.Output
return op.Output(0)
}
-// CropAndResizeGradImageAttr is an optional argument to CropAndResizeGradImage.
-type CropAndResizeGradImageAttr func(optionalAttr)
+// Computes softsign: `features / (abs(features) + 1)`.
+func Softsign(scope *Scope, features tf.Output) (activations tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "Softsign",
+ Input: []tf.Input{
+ features,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
-// CropAndResizeGradImageMethod sets the optional method attribute to value.
+// GenerateVocabRemappingAttr is an optional argument to GenerateVocabRemapping.
+type GenerateVocabRemappingAttr func(optionalAttr)
+
+// GenerateVocabRemappingOldVocabSize sets the optional old_vocab_size attribute to value.
//
-// value: A string specifying the interpolation method. Only 'bilinear' is
-// supported for now.
-// If not specified, defaults to "bilinear"
-func CropAndResizeGradImageMethod(value string) CropAndResizeGradImageAttr {
+// value: Number of entries in the old vocab file to consider. If -1,
+// use the entire old vocabulary.
+// If not specified, defaults to -1
+//
+// REQUIRES: value >= -1
+func GenerateVocabRemappingOldVocabSize(value int64) GenerateVocabRemappingAttr {
return func(m optionalAttr) {
- m["method"] = value
+ m["old_vocab_size"] = value
}
}
-// Computes the gradient of the crop_and_resize op wrt the input image tensor.
+// Given a path to new and old vocabulary files, returns a remapping Tensor of
//
-// Arguments:
-// grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
-// boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
-// specifies the coordinates of a box in the `box_ind[i]` image and is specified
-// in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
-// `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
-// `[0, 1]` interval of normalized image height is mapped to
-// `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in
-// which case the sampled crop is an up-down flipped version of the original
-// image. The width dimension is treated similarly. Normalized coordinates
-// outside the `[0, 1]` range are allowed, in which case we use
-// `extrapolation_value` to extrapolate the input image values.
-// box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
-// The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
-// image_size: A 1-D tensor with value `[batch, image_height, image_width, depth]`
-// containing the original image size. Both `image_height` and `image_width` need
-// to be positive.
+// length `num_new_vocab`, where `remapping[i]` contains the row number in the old
+// vocabulary that corresponds to row `i` in the new vocabulary (starting at line
+// `new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i`
+// in the new vocabulary is not in the old vocabulary. The old vocabulary is
+// constrained to the first `old_vocab_size` entries if `old_vocab_size` is not the
+// default value of -1.
//
+// `num_vocab_offset` enables
+// use in the partitioned variable case, and should generally be set through
+// examining partitioning info. The format of the files should be a text file,
+// with each line containing a single entity within the vocabulary.
//
-// Returns A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
-func CropAndResizeGradImage(scope *Scope, grads tf.Output, boxes tf.Output, box_ind tf.Output, image_size tf.Output, T tf.DataType, optional ...CropAndResizeGradImageAttr) (output tf.Output) {
+// For example, with `new_vocab_file` a text file containing each of the following
+// elements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, f3],
+// `num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be
+// `[0, -1, 2]`.
+//
+// The op also returns a count of how many entries in the new vocabulary
+// were present in the old vocabulary, which is used to calculate the number of
+// values to initialize in a weight matrix remapping
+//
+// This functionality can be used to remap both row vocabularies (typically,
+// features) and column vocabularies (typically, classes) from TensorFlow
+// checkpoints. Note that the partitioning logic relies on contiguous vocabularies
+// corresponding to div-partitioned variables. Moreover, the underlying remapping
+// uses an IndexTable (as opposed to an inexact CuckooTable), so client code should
+// use the corresponding index_table_from_file() as the FeatureColumn framework
+// does (as opposed to tf.feature_to_id(), which uses a CuckooTable).
+//
+// Arguments:
+// new_vocab_file: Path to the new vocab file.
+// old_vocab_file: Path to the old vocab file.
+// new_vocab_offset: How many entries into the new vocab file to start reading.
+// num_new_vocab: Number of entries in the new vocab file to remap.
+//
+// Returns A Tensor of length num_new_vocab where the element at index i
+// is equal to the old ID that maps to the new ID i. This element is -1 for any
+// new ID that is not found in the old vocabulary.Number of new vocab entries found in old vocab.
+func GenerateVocabRemapping(scope *Scope, new_vocab_file tf.Output, old_vocab_file tf.Output, new_vocab_offset int64, num_new_vocab int64, optional ...GenerateVocabRemappingAttr) (remapping tf.Output, num_present tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"T": T}
+ attrs := map[string]interface{}{"new_vocab_offset": new_vocab_offset, "num_new_vocab": num_new_vocab}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "CropAndResizeGradImage",
+ Type: "GenerateVocabRemapping",
Input: []tf.Input{
- grads, boxes, box_ind, image_size,
+ new_vocab_file, old_vocab_file,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1)
}
-// A container for an iterator resource.
+// Assigns sparse updates to the variable referenced by `resource`.
//
-// Returns A handle to the iterator that can be passed to a "MakeIterator"
-// or "IteratorGetNext" op.
-func Iterator(scope *Scope, shared_name string, container string, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
+// This operation computes
+//
+// # Scalar indices
+// ref[indices, ...] = updates[...]
+//
+// # Vector indices (for each i)
+// ref[indices[i], ...] = updates[i, ...]
+//
+// # High rank indices (for each i, ..., j)
+// ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
+//
+// Arguments:
+// resource: Should be from a `Variable` node.
+// indices: A tensor of indices into the first dimension of `ref`.
+// updates: A tensor of updated values to add to `ref`.
+//
+// Returns the created operation.
+func ResourceScatterUpdate(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"shared_name": shared_name, "container": container, "output_types": output_types, "output_shapes": output_shapes}
opspec := tf.OpSpec{
- Type: "Iterator",
-
- Attrs: attrs,
+ Type: "ResourceScatterUpdate",
+ Input: []tf.Input{
+ resource, indices, updates,
+ },
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// ExtractGlimpseAttr is an optional argument to ExtractGlimpse.
-type ExtractGlimpseAttr func(optionalAttr)
+// CumsumAttr is an optional argument to Cumsum.
+type CumsumAttr func(optionalAttr)
-// ExtractGlimpseCentered sets the optional centered attribute to value.
+// CumsumExclusive sets the optional exclusive attribute to value.
//
-// value: indicates if the offset coordinates are centered relative to
-// the image, in which case the (0, 0) offset is relative to the center
-// of the input images. If false, the (0,0) offset corresponds to the
-// upper left corner of the input images.
-// If not specified, defaults to true
-func ExtractGlimpseCentered(value bool) ExtractGlimpseAttr {
+// value: If `True`, perform exclusive cumsum.
+// If not specified, defaults to false
+func CumsumExclusive(value bool) CumsumAttr {
return func(m optionalAttr) {
- m["centered"] = value
+ m["exclusive"] = value
}
}
-// ExtractGlimpseNormalized sets the optional normalized attribute to value.
+// CumsumReverse sets the optional reverse attribute to value.
//
-// value: indicates if the offset coordinates are normalized.
-// If not specified, defaults to true
-func ExtractGlimpseNormalized(value bool) ExtractGlimpseAttr {
+// value: A `bool` (default: False).
+// If not specified, defaults to false
+func CumsumReverse(value bool) CumsumAttr {
return func(m optionalAttr) {
- m["normalized"] = value
+ m["reverse"] = value
}
}
-// ExtractGlimpseUniformNoise sets the optional uniform_noise attribute to value.
+// Compute the cumulative sum of the tensor `x` along `axis`.
//
-// value: indicates if the noise should be generated using a
-// uniform distribution or a Gaussian distribution.
-// If not specified, defaults to true
-func ExtractGlimpseUniformNoise(value bool) ExtractGlimpseAttr {
- return func(m optionalAttr) {
- m["uniform_noise"] = value
- }
-}
-
-// Extracts a glimpse from the input tensor.
+// By default, this op performs an inclusive cumsum, which means that the first
+// element of the input is identical to the first element of the output:
//
-// Returns a set of windows called glimpses extracted at location
-// `offsets` from the input tensor. If the windows only partially
-// overlaps the inputs, the non overlapping areas will be filled with
-// random noise.
+// ```python
+// tf.cumsum([a, b, c]) # => [a, a + b, a + b + c]
+// ```
//
-// The result is a 4-D tensor of shape `[batch_size, glimpse_height,
-// glimpse_width, channels]`. The channels and batch dimensions are the
-// same as that of the input tensor. The height and width of the output
-// windows are specified in the `size` parameter.
+// By setting the `exclusive` kwarg to `True`, an exclusive cumsum is
+// performed instead:
//
-// The argument `normalized` and `centered` controls how the windows are built:
+// ```python
+// tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b]
+// ```
//
-// * If the coordinates are normalized but not centered, 0.0 and 1.0
-// correspond to the minimum and maximum of each height and width
-// dimension.
-// * If the coordinates are both normalized and centered, they range from
-// -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper
-// left corner, the lower right corner is located at (1.0, 1.0) and the
-// center is at (0, 0).
-// * If the coordinates are not normalized they are interpreted as
-// numbers of pixels.
+// By setting the `reverse` kwarg to `True`, the cumsum is performed in the
+// opposite direction:
//
-// Arguments:
-// input: A 4-D float tensor of shape `[batch_size, height, width, channels]`.
-// size: A 1-D tensor of 2 elements containing the size of the glimpses
-// to extract. The glimpse height must be specified first, following
-// by the glimpse width.
-// offsets: A 2-D integer tensor of shape `[batch_size, 2]` containing
-// the y, x locations of the center of each window.
+// ```python
+// tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c]
+// ```
//
-// Returns A tensor representing the glimpses `[batch_size,
-// glimpse_height, glimpse_width, channels]`.
-func ExtractGlimpse(scope *Scope, input tf.Output, size tf.Output, offsets tf.Output, optional ...ExtractGlimpseAttr) (glimpse tf.Output) {
+// This is more efficient than using separate `tf.reverse` ops.
+//
+// The `reverse` and `exclusive` kwargs can also be combined:
+//
+// ```python
+// tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0]
+// ```
+//
+// Arguments:
+// x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
+// `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
+// `complex128`, `qint8`, `quint8`, `qint32`, `half`.
+// axis: A `Tensor` of type `int32` (default: 0). Must be in the range
+// `[-rank(x), rank(x))`.
+func Cumsum(scope *Scope, x tf.Output, axis tf.Output, optional ...CumsumAttr) (out tf.Output) {
if scope.Err() != nil {
return
}
@@ -6567,9 +6479,9 @@ func ExtractGlimpse(scope *Scope, input tf.Output, size tf.Output, offsets tf.Ou
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ExtractGlimpse",
+ Type: "Cumsum",
Input: []tf.Input{
- input, size, offsets,
+ x, axis,
},
Attrs: attrs,
}
@@ -6577,323 +6489,380 @@ func ExtractGlimpse(scope *Scope, input tf.Output, size tf.Output, offsets tf.Ou
return op.Output(0)
}
-// SampleDistortedBoundingBoxV2Attr is an optional argument to SampleDistortedBoundingBoxV2.
-type SampleDistortedBoundingBoxV2Attr func(optionalAttr)
+// QuantizedRelu6Attr is an optional argument to QuantizedRelu6.
+type QuantizedRelu6Attr func(optionalAttr)
-// SampleDistortedBoundingBoxV2Seed sets the optional seed attribute to value.
+// QuantizedRelu6OutType sets the optional out_type attribute to value.
+// If not specified, defaults to DT_QUINT8
+func QuantizedRelu6OutType(value tf.DataType) QuantizedRelu6Attr {
+ return func(m optionalAttr) {
+ m["out_type"] = value
+ }
+}
+
+// Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)`
//
-// value: If either `seed` or `seed2` are set to non-zero, the random number
-// generator is seeded by the given `seed`. Otherwise, it is seeded by a random
-// seed.
+// Arguments:
+//
+// min_features: The float value that the lowest quantized value represents.
+// max_features: The float value that the highest quantized value represents.
+//
+// Returns Has the same output shape as "features".The float value that the lowest quantized value represents.The float value that the highest quantized value represents.
+func QuantizedRelu6(scope *Scope, features tf.Output, min_features tf.Output, max_features tf.Output, optional ...QuantizedRelu6Attr) (activations tf.Output, min_activations tf.Output, max_activations tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
+ opspec := tf.OpSpec{
+ Type: "QuantizedRelu6",
+ Input: []tf.Input{
+ features, min_features, max_features,
+ },
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0), op.Output(1), op.Output(2)
+}
+
+// FixedLengthRecordReaderV2Attr is an optional argument to FixedLengthRecordReaderV2.
+type FixedLengthRecordReaderV2Attr func(optionalAttr)
+
+// FixedLengthRecordReaderV2HeaderBytes sets the optional header_bytes attribute to value.
+//
+// value: Number of bytes in the header, defaults to 0.
// If not specified, defaults to 0
-func SampleDistortedBoundingBoxV2Seed(value int64) SampleDistortedBoundingBoxV2Attr {
+func FixedLengthRecordReaderV2HeaderBytes(value int64) FixedLengthRecordReaderV2Attr {
return func(m optionalAttr) {
- m["seed"] = value
+ m["header_bytes"] = value
}
}
-// SampleDistortedBoundingBoxV2Seed2 sets the optional seed2 attribute to value.
+// FixedLengthRecordReaderV2FooterBytes sets the optional footer_bytes attribute to value.
//
-// value: A second seed to avoid seed collision.
+// value: Number of bytes in the footer, defaults to 0.
// If not specified, defaults to 0
-func SampleDistortedBoundingBoxV2Seed2(value int64) SampleDistortedBoundingBoxV2Attr {
+func FixedLengthRecordReaderV2FooterBytes(value int64) FixedLengthRecordReaderV2Attr {
return func(m optionalAttr) {
- m["seed2"] = value
+ m["footer_bytes"] = value
}
}
-// SampleDistortedBoundingBoxV2AspectRatioRange sets the optional aspect_ratio_range attribute to value.
+// FixedLengthRecordReaderV2HopBytes sets the optional hop_bytes attribute to value.
//
-// value: The cropped area of the image must have an aspect ratio =
-// width / height within this range.
-// If not specified, defaults to <f:0.75 f:1.33 >
-func SampleDistortedBoundingBoxV2AspectRatioRange(value []float32) SampleDistortedBoundingBoxV2Attr {
+// value: Number of bytes to hop before each read. Default of 0 means using
+// record_bytes.
+// If not specified, defaults to 0
+func FixedLengthRecordReaderV2HopBytes(value int64) FixedLengthRecordReaderV2Attr {
return func(m optionalAttr) {
- m["aspect_ratio_range"] = value
+ m["hop_bytes"] = value
}
}
-// SampleDistortedBoundingBoxV2AreaRange sets the optional area_range attribute to value.
+// FixedLengthRecordReaderV2Container sets the optional container attribute to value.
//
-// value: The cropped area of the image must contain a fraction of the
-// supplied image within in this range.
-// If not specified, defaults to <f:0.05 f:1 >
-func SampleDistortedBoundingBoxV2AreaRange(value []float32) SampleDistortedBoundingBoxV2Attr {
+// value: If non-empty, this reader is placed in the given container.
+// Otherwise, a default container is used.
+// If not specified, defaults to ""
+func FixedLengthRecordReaderV2Container(value string) FixedLengthRecordReaderV2Attr {
return func(m optionalAttr) {
- m["area_range"] = value
+ m["container"] = value
}
}
-// SampleDistortedBoundingBoxV2MaxAttempts sets the optional max_attempts attribute to value.
+// FixedLengthRecordReaderV2SharedName sets the optional shared_name attribute to value.
//
-// value: Number of attempts at generating a cropped region of the image
-// of the specified constraints. After `max_attempts` failures, return the entire
-// image.
-// If not specified, defaults to 100
-func SampleDistortedBoundingBoxV2MaxAttempts(value int64) SampleDistortedBoundingBoxV2Attr {
+// value: If non-empty, this reader is named in the given bucket
+// with this shared_name. Otherwise, the node name is used instead.
+// If not specified, defaults to ""
+func FixedLengthRecordReaderV2SharedName(value string) FixedLengthRecordReaderV2Attr {
return func(m optionalAttr) {
- m["max_attempts"] = value
+ m["shared_name"] = value
}
}
-// SampleDistortedBoundingBoxV2UseImageIfNoBoundingBoxes sets the optional use_image_if_no_bounding_boxes attribute to value.
+// FixedLengthRecordReaderV2Encoding sets the optional encoding attribute to value.
//
-// value: Controls behavior if no bounding boxes supplied.
-// If true, assume an implicit bounding box covering the whole input. If false,
-// raise an error.
-// If not specified, defaults to false
-func SampleDistortedBoundingBoxV2UseImageIfNoBoundingBoxes(value bool) SampleDistortedBoundingBoxV2Attr {
+// value: The type of encoding for the file. Currently ZLIB and GZIP
+// are supported. Defaults to none.
+// If not specified, defaults to ""
+func FixedLengthRecordReaderV2Encoding(value string) FixedLengthRecordReaderV2Attr {
return func(m optionalAttr) {
- m["use_image_if_no_bounding_boxes"] = value
+ m["encoding"] = value
}
}
-// Generate a single randomly distorted bounding box for an image.
-//
-// Bounding box annotations are often supplied in addition to ground-truth labels
-// in image recognition or object localization tasks. A common technique for
-// training such a system is to randomly distort an image while preserving
-// its content, i.e. *data augmentation*. This Op outputs a randomly distorted
-// localization of an object, i.e. bounding box, given an `image_size`,
-// `bounding_boxes` and a series of constraints.
-//
-// The output of this Op is a single bounding box that may be used to crop the
-// original image. The output is returned as 3 tensors: `begin`, `size` and
-// `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
-// image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize
-// what the bounding box looks like.
-//
-// Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The
-// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
-// height of the underlying image.
-//
-// For example,
-//
-// ```python
-// # Generate a single distorted bounding box.
-// begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
-// tf.shape(image),
-// bounding_boxes=bounding_boxes)
-//
-// # Draw the bounding box in an image summary.
-// image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
-// bbox_for_draw)
-// tf.summary.image('images_with_box', image_with_box)
-//
-// # Employ the bounding box to distort the image.
-// distorted_image = tf.slice(image, begin, size)
-// ```
-//
-// Note that if no bounding box information is available, setting
-// `use_image_if_no_bounding_boxes = true` will assume there is a single implicit
-// bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
-// false and no bounding boxes are supplied, an error is raised.
+// A Reader that outputs fixed-length records from a file.
//
// Arguments:
-// image_size: 1-D, containing `[height, width, channels]`.
-// bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes
-// associated with the image.
-// min_object_covered: The cropped area of the image must contain at least this
-// fraction of any bounding box supplied. The value of this parameter should be
-// non-negative. In the case of 0, the cropped area does not need to overlap
-// any of the bounding boxes supplied.
+// record_bytes: Number of bytes in the record.
//
-// Returns 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to
-// `tf.slice`.1-D, containing `[target_height, target_width, -1]`. Provide as input to
-// `tf.slice`.3-D with shape `[1, 1, 4]` containing the distorted bounding box.
-// Provide as input to `tf.image.draw_bounding_boxes`.
-func SampleDistortedBoundingBoxV2(scope *Scope, image_size tf.Output, bounding_boxes tf.Output, min_object_covered tf.Output, optional ...SampleDistortedBoundingBoxV2Attr) (begin tf.Output, size tf.Output, bboxes tf.Output) {
+// Returns The handle to reference the Reader.
+func FixedLengthRecordReaderV2(scope *Scope, record_bytes int64, optional ...FixedLengthRecordReaderV2Attr) (reader_handle tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"record_bytes": record_bytes}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "SampleDistortedBoundingBoxV2",
- Input: []tf.Input{
- image_size, bounding_boxes, min_object_covered,
- },
+ Type: "FixedLengthRecordReaderV2",
+
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
+ return op.Output(0)
}
-// Draw bounding boxes on a batch of images.
-//
-// Outputs a copy of `images` but draws on top of the pixels zero or more bounding
-// boxes specified by the locations in `boxes`. The coordinates of the each
-// bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The
-// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
-// height of the underlying image.
-//
-// For example, if an image is 100 x 200 pixels (height x width) and the bounding
-// box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of
-// the bounding box will be `(40, 10)` to `(100, 50)` (in (x,y) coordinates).
+// The gradient operator for the SparseAdd op.
//
-// Parts of the bounding box may fall outside the image.
+// The SparseAdd op calculates A + B, where A, B, and the sum are all represented
+// as `SparseTensor` objects. This op takes in the upstream gradient w.r.t.
+// non-empty values of the sum, and outputs the gradients w.r.t. the non-empty
+// values of A and B.
//
// Arguments:
-// images: 4-D with shape `[batch, height, width, depth]`. A batch of images.
-// boxes: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding
-// boxes.
+// backprop_val_grad: 1-D with shape `[nnz(sum)]`. The gradient with respect to
+// the non-empty values of the sum.
+// a_indices: 2-D. The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`.
+// b_indices: 2-D. The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`.
+// sum_indices: 2-D. The `indices` of the sum `SparseTensor`, size
+// `[nnz(sum), ndims]`.
//
-// Returns 4-D with the same shape as `images`. The batch of input images with
-// bounding boxes drawn on the images.
-func DrawBoundingBoxes(scope *Scope, images tf.Output, boxes tf.Output) (output tf.Output) {
+// Returns 1-D with shape `[nnz(A)]`. The gradient with respect to the
+// non-empty values of A.1-D with shape `[nnz(B)]`. The gradient with respect to the
+// non-empty values of B.
+func SparseAddGrad(scope *Scope, backprop_val_grad tf.Output, a_indices tf.Output, b_indices tf.Output, sum_indices tf.Output) (a_val_grad tf.Output, b_val_grad tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "DrawBoundingBoxes",
+ Type: "SparseAddGrad",
Input: []tf.Input{
- images, boxes,
+ backprop_val_grad, a_indices, b_indices, sum_indices,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0), op.Output(1)
+}
+
+// Computes atan of x element-wise.
+func Atan(scope *Scope, x tf.Output) (y tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "Atan",
+ Input: []tf.Input{
+ x,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Convert one or more images from HSV to RGB.
+// Encode audio data using the WAV file format.
//
-// Outputs a tensor of the same shape as the `images` tensor, containing the RGB
-// value of the pixels. The output is only well defined if the value in `images`
-// are in `[0,1]`.
+// This operation will generate a string suitable to be saved out to create a .wav
+// audio file. It will be encoded in the 16-bit PCM format. It takes in float
+// values in the range -1.0f to 1.0f, and any outside that value will be clamped to
+// that range.
//
-// See `rgb_to_hsv` for a description of the HSV encoding.
+// `audio` is a 2-D float Tensor of shape `[length, channels]`.
+// `sample_rate` is a scalar Tensor holding the rate to use (e.g. 44100).
//
// Arguments:
-// images: 1-D or higher rank. HSV data to convert. Last dimension must be size 3.
+// audio: 2-D with shape `[length, channels]`.
+// sample_rate: Scalar containing the sample frequency.
//
-// Returns `images` converted to RGB.
-func HSVToRGB(scope *Scope, images tf.Output) (output tf.Output) {
+// Returns 0-D. WAV-encoded file contents.
+func EncodeWav(scope *Scope, audio tf.Output, sample_rate tf.Output) (contents tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "HSVToRGB",
+ Type: "EncodeWav",
Input: []tf.Input{
- images,
+ audio, sample_rate,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Returns a list of tensors with the same shapes and contents as the input
+// Converts each string in the input Tensor to its hash mod by a number of buckets.
//
-// tensors.
+// The hash function is deterministic on the content of the string within the
+// process. The hash function is a keyed hash function, where attribute `key`
+// defines the key of the hash function. `key` is an array of 2 elements.
//
-// This op can be used to override the gradient for complicated functions. For
-// example, suppose y = f(x) and we wish to apply a custom function g for backprop
-// such that dx = g(dy). In Python,
+// A strong hash is important when inputs may be malicious, e.g. URLs with
+// additional components. Adversaries could try to make their inputs hash to the
+// same bucket for a denial-of-service attack or to skew the results. A strong
+// hash prevents this by making it difficult, if not infeasible, to compute inputs
+// that hash to the same bucket. This comes at a cost of roughly 4x higher compute
+// time than `tf.string_to_hash_bucket_fast`.
//
-// ```python
-// with tf.get_default_graph().gradient_override_map(
-// {'IdentityN': 'OverrideGradientWithG'}):
-// y, _ = identity_n([f(x), x])
+// Arguments:
+// input: The strings to assign a hash bucket.
+// num_buckets: The number of buckets.
+// key: The key for the keyed hash function passed as a list of two uint64
+// elements.
//
-// @tf.RegisterGradient('OverrideGradientWithG')
-// def ApplyG(op, dy, _):
-// return [None, g(dy)] # Do not backprop to f(x).
-// ```
-func IdentityN(scope *Scope, input []tf.Output) (output []tf.Output) {
+// Returns A Tensor of the same shape as the input `string_tensor`.
+func StringToHashBucketStrong(scope *Scope, input tf.Output, num_buckets int64, key []int64) (output tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"num_buckets": num_buckets, "key": key}
opspec := tf.OpSpec{
- Type: "IdentityN",
+ Type: "StringToHashBucketStrong",
Input: []tf.Input{
- tf.OutputList(input),
+ input,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
+// Generates values in an interval.
+//
+// A sequence of `num` evenly-spaced values are generated beginning at `start`.
+// If `num > 1`, the values in the sequence increase by `stop - start / num - 1`,
+// so that the last one is exactly `stop`.
+//
+// For example:
+//
+// ```
+// tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0]
+// ```
+//
+// Arguments:
+// start: First entry in the range.
+// stop: Last entry in the range.
+// num: Number of values to generate.
+//
+// Returns 1-D. The generated values.
+func LinSpace(scope *Scope, start tf.Output, stop tf.Output, num tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
- var idx int
- var err error
- if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
- scope.UpdateErr("IdentityN", err)
- return
+ opspec := tf.OpSpec{
+ Type: "LinSpace",
+ Input: []tf.Input{
+ start, stop, num,
+ },
}
- return output
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Decode the first frame of a GIF-encoded image to a uint8 tensor.
-//
-// GIF with frame or transparency compression are not supported
-// convert animated GIF from compressed to uncompressed by:
+// DestroyResourceOpAttr is an optional argument to DestroyResourceOp.
+type DestroyResourceOpAttr func(optionalAttr)
+
+// DestroyResourceOpIgnoreLookupError sets the optional ignore_lookup_error attribute to value.
//
-// convert $src.gif -coalesce $dst.gif
+// value: whether to ignore the error when the resource
+// doesn't exist.
+// If not specified, defaults to true
+func DestroyResourceOpIgnoreLookupError(value bool) DestroyResourceOpAttr {
+ return func(m optionalAttr) {
+ m["ignore_lookup_error"] = value
+ }
+}
+
+// Deletes the resource specified by the handle.
//
-// This op also supports decoding JPEGs and PNGs, though it is cleaner to use
-// `tf.image.decode_image`.
+// All subsequent operations using the resource will result in a NotFound
+// error status.
//
// Arguments:
-// contents: 0-D. The GIF-encoded image.
+// resource: handle to the resource to delete.
//
-// Returns 4-D with shape `[num_frames, height, width, 3]`. RGB order
-func DecodeGif(scope *Scope, contents tf.Output) (image tf.Output) {
+// Returns the created operation.
+func DestroyResourceOp(scope *Scope, resource tf.Output, optional ...DestroyResourceOpAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "DecodeGif",
+ Type: "DestroyResourceOp",
Input: []tf.Input{
- contents,
+ resource,
},
+ Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// DecodePngAttr is an optional argument to DecodePng.
-type DecodePngAttr func(optionalAttr)
+// CumprodAttr is an optional argument to Cumprod.
+type CumprodAttr func(optionalAttr)
-// DecodePngChannels sets the optional channels attribute to value.
+// CumprodExclusive sets the optional exclusive attribute to value.
//
-// value: Number of color channels for the decoded image.
-// If not specified, defaults to 0
-func DecodePngChannels(value int64) DecodePngAttr {
+// value: If `True`, perform exclusive cumprod.
+// If not specified, defaults to false
+func CumprodExclusive(value bool) CumprodAttr {
return func(m optionalAttr) {
- m["channels"] = value
+ m["exclusive"] = value
}
}
-// DecodePngDtype sets the optional dtype attribute to value.
-// If not specified, defaults to DT_UINT8
-func DecodePngDtype(value tf.DataType) DecodePngAttr {
+// CumprodReverse sets the optional reverse attribute to value.
+//
+// value: A `bool` (default: False).
+// If not specified, defaults to false
+func CumprodReverse(value bool) CumprodAttr {
return func(m optionalAttr) {
- m["dtype"] = value
+ m["reverse"] = value
}
}
-// Decode a PNG-encoded image to a uint8 or uint16 tensor.
+// Compute the cumulative product of the tensor `x` along `axis`.
//
-// The attr `channels` indicates the desired number of color channels for the
-// decoded image.
+// By default, this op performs an inclusive cumprod, which means that the first
+// element of the input is identical to the first element of the output:
//
-// Accepted values are:
+// ```python
+// tf.cumprod([a, b, c]) # => [a, a * b, a * b * c]
+// ```
//
-// * 0: Use the number of channels in the PNG-encoded image.
-// * 1: output a grayscale image.
-// * 3: output an RGB image.
-// * 4: output an RGBA image.
+// By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
+// performed instead:
//
-// If needed, the PNG-encoded image is transformed to match the requested number
-// of color channels.
+// ```python
+// tf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b]
+// ```
//
-// This op also supports decoding JPEGs and non-animated GIFs since the interface
-// is the same, though it is cleaner to use `tf.image.decode_image`.
+// By setting the `reverse` kwarg to `True`, the cumprod is performed in the
+// opposite direction:
//
-// Arguments:
-// contents: 0-D. The PNG-encoded image.
+// ```python
+// tf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c]
+// ```
//
-// Returns 3-D with shape `[height, width, channels]`.
-func DecodePng(scope *Scope, contents tf.Output, optional ...DecodePngAttr) (image tf.Output) {
+// This is more efficient than using separate `tf.reverse` ops.
+//
+// The `reverse` and `exclusive` kwargs can also be combined:
+//
+// ```python
+// tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1]
+// ```
+//
+// Arguments:
+// x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
+// `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
+// `complex128`, `qint8`, `quint8`, `qint32`, `half`.
+// axis: A `Tensor` of type `int32` (default: 0). Must be in the range
+// `[-rank(x), rank(x))`.
+func Cumprod(scope *Scope, x tf.Output, axis tf.Output, optional ...CumprodAttr) (out tf.Output) {
if scope.Err() != nil {
return
}
@@ -6902,9 +6871,9 @@ func DecodePng(scope *Scope, contents tf.Output, optional ...DecodePngAttr) (ima
a(attrs)
}
opspec := tf.OpSpec{
- Type: "DecodePng",
+ Type: "Cumprod",
Input: []tf.Input{
- contents,
+ x, axis,
},
Attrs: attrs,
}
@@ -6912,153 +6881,125 @@ func DecodePng(scope *Scope, contents tf.Output, optional ...DecodePngAttr) (ima
return op.Output(0)
}
-// Adjust the contrast of one or more images.
+// Computes the mean along segments of a tensor.
//
-// `images` is a tensor of at least 3 dimensions. The last 3 dimensions are
-// interpreted as `[height, width, channels]`. The other dimensions only
-// represent a collection of images, such as `[batch, height, width, channels].`
+// Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+// segments.
//
-// Contrast is adjusted independently for each channel of each image.
+// Computes a tensor such that
+// \\(output_i = \frac{\sum_j data_j}{N}\\) where `mean` is
+// over `j` such that `segment_ids[j] == i` and `N` is the total number of
+// values summed.
//
-// For each channel, the Op first computes the mean of the image pixels in the
-// channel and then adjusts each component of each pixel to
-// `(x - mean) * contrast_factor + mean`.
+// If the mean is empty for a given segment ID `i`, `output[i] = 0`.
+//
+// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMean.png" alt>
+// </div>
//
// Arguments:
-// images: Images to adjust. At least 3-D.
-// contrast_factor: A float multiplier for adjusting contrast.
//
-// Returns The contrast-adjusted image or images.
-func AdjustContrastv2(scope *Scope, images tf.Output, contrast_factor tf.Output) (output tf.Output) {
+// segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s
+// first dimension. Values should be sorted and can be repeated.
+//
+// Returns Has same shape as data, except for dimension 0 which
+// has size `k`, the number of segments.
+func SegmentMean(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "AdjustContrastv2",
+ Type: "SegmentMean",
Input: []tf.Input{
- images, contrast_factor,
+ data, segment_ids,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// PaddingFIFOQueueV2Attr is an optional argument to PaddingFIFOQueueV2.
-type PaddingFIFOQueueV2Attr func(optionalAttr)
+// ResourceSparseApplyCenteredRMSPropAttr is an optional argument to ResourceSparseApplyCenteredRMSProp.
+type ResourceSparseApplyCenteredRMSPropAttr func(optionalAttr)
-// PaddingFIFOQueueV2Shapes sets the optional shapes attribute to value.
-//
-// value: The shape of each component in a value. The length of this attr must
-// be either 0 or the same as the length of component_types.
-// Shapes of fixed rank but variable size are allowed by setting
-// any shape dimension to -1. In this case, the inputs' shape may vary along
-// the given dimension, and DequeueMany will pad the given dimension with
-// zeros up to the maximum shape of all elements in the given batch.
-// If the length of this attr is 0, different queue elements may have
-// different ranks and shapes, but only one element may be dequeued at a time.
-// If not specified, defaults to <>
+// ResourceSparseApplyCenteredRMSPropUseLocking sets the optional use_locking attribute to value.
//
-// REQUIRES: len(value) >= 0
-func PaddingFIFOQueueV2Shapes(value []tf.Shape) PaddingFIFOQueueV2Attr {
+// value: If `True`, updating of the var, mg, ms, and mom tensors is
+// protected by a lock; otherwise the behavior is undefined, but may exhibit less
+// contention.
+// If not specified, defaults to false
+func ResourceSparseApplyCenteredRMSPropUseLocking(value bool) ResourceSparseApplyCenteredRMSPropAttr {
return func(m optionalAttr) {
- m["shapes"] = value
+ m["use_locking"] = value
}
}
-// PaddingFIFOQueueV2Capacity sets the optional capacity attribute to value.
+// Update '*var' according to the centered RMSProp algorithm.
//
-// value: The upper bound on the number of elements in this queue.
-// Negative numbers mean no limit.
-// If not specified, defaults to -1
-func PaddingFIFOQueueV2Capacity(value int64) PaddingFIFOQueueV2Attr {
- return func(m optionalAttr) {
- m["capacity"] = value
- }
-}
-
-// PaddingFIFOQueueV2Container sets the optional container attribute to value.
+// The centered RMSProp algorithm uses an estimate of the centered second moment
+// (i.e., the variance) for normalization, as opposed to regular RMSProp, which
+// uses the (uncentered) second moment. This often helps with training, but is
+// slightly more expensive in terms of computation and memory.
//
-// value: If non-empty, this queue is placed in the given container.
-// Otherwise, a default container is used.
-// If not specified, defaults to ""
-func PaddingFIFOQueueV2Container(value string) PaddingFIFOQueueV2Attr {
- return func(m optionalAttr) {
- m["container"] = value
- }
-}
-
-// PaddingFIFOQueueV2SharedName sets the optional shared_name attribute to value.
+// Note that in dense implementation of this algorithm, mg, ms, and mom will
+// update even if the grad is zero, but in this sparse implementation, mg, ms,
+// and mom will not update in iterations during which the grad is zero.
//
-// value: If non-empty, this queue will be shared under the given name
-// across multiple sessions.
-// If not specified, defaults to ""
-func PaddingFIFOQueueV2SharedName(value string) PaddingFIFOQueueV2Attr {
- return func(m optionalAttr) {
- m["shared_name"] = value
- }
-}
-
-// A queue that produces elements in first-in first-out order.
+// mean_square = decay * mean_square + (1-decay) * gradient ** 2
+// mean_grad = decay * mean_grad + (1-decay) * gradient
+// Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
//
-// Variable-size shapes are allowed by setting the corresponding shape dimensions
-// to 0 in the shape attr. In this case DequeueMany will pad up to the maximum
-// size of any given element in the minibatch. See below for details.
+// ms <- rho * ms_{t-1} + (1-rho) * grad * grad
+// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
+// var <- var - mom
//
// Arguments:
-// component_types: The type of each component in a value.
+// var_: Should be from a Variable().
+// mg: Should be from a Variable().
+// ms: Should be from a Variable().
+// mom: Should be from a Variable().
+// lr: Scaling factor. Must be a scalar.
+// rho: Decay rate. Must be a scalar.
//
-// Returns The handle to the queue.
-func PaddingFIFOQueueV2(scope *Scope, component_types []tf.DataType, optional ...PaddingFIFOQueueV2Attr) (handle tf.Output) {
+// epsilon: Ridge term. Must be a scalar.
+// grad: The gradient.
+// indices: A vector of indices into the first dimension of var, ms and mom.
+//
+// Returns the created operation.
+func ResourceSparseApplyCenteredRMSProp(scope *Scope, var_ tf.Output, mg tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyCenteredRMSPropAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"component_types": component_types}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "PaddingFIFOQueueV2",
-
+ Type: "ResourceSparseApplyCenteredRMSProp",
+ Input: []tf.Input{
+ var_, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices,
+ },
Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// ExtractJpegShapeAttr is an optional argument to ExtractJpegShape.
-type ExtractJpegShapeAttr func(optionalAttr)
-
-// ExtractJpegShapeOutputType sets the optional output_type attribute to value.
+// Creates a dataset that batches `batch_size` elements from `input_dataset`.
//
-// value: (Optional) The output type of the operation (int32 or int64).
-// Defaults to int32.
-// If not specified, defaults to DT_INT32
-func ExtractJpegShapeOutputType(value tf.DataType) ExtractJpegShapeAttr {
- return func(m optionalAttr) {
- m["output_type"] = value
- }
-}
-
-// Extract the shape information of a JPEG-encoded image.
+// Arguments:
//
-// This op only parses the image header, so it is much faster than DecodeJpeg.
+// batch_size: A scalar representing the number of elements to accumulate in a
+// batch.
//
-// Arguments:
-// contents: 0-D. The JPEG-encoded image.
//
-// Returns 1-D. The image shape with format [height, width, channels].
-func ExtractJpegShape(scope *Scope, contents tf.Output, optional ...ExtractJpegShapeAttr) (image_shape tf.Output) {
+func BatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
+ attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
opspec := tf.OpSpec{
- Type: "ExtractJpegShape",
+ Type: "BatchDataset",
Input: []tf.Input{
- contents,
+ input_dataset, batch_size,
},
Attrs: attrs,
}
@@ -7066,103 +7007,94 @@ func ExtractJpegShape(scope *Scope, contents tf.Output, optional ...ExtractJpegS
return op.Output(0)
}
-// DecodeJpegAttr is an optional argument to DecodeJpeg.
-type DecodeJpegAttr func(optionalAttr)
-
-// DecodeJpegChannels sets the optional channels attribute to value.
+// Inverse fast Fourier transform.
//
-// value: Number of color channels for the decoded image.
-// If not specified, defaults to 0
-func DecodeJpegChannels(value int64) DecodeJpegAttr {
- return func(m optionalAttr) {
- m["channels"] = value
- }
-}
-
-// DecodeJpegRatio sets the optional ratio attribute to value.
+// Computes the inverse 1-dimensional discrete Fourier transform over the
+// inner-most dimension of `input`.
//
-// value: Downscaling ratio.
-// If not specified, defaults to 1
-func DecodeJpegRatio(value int64) DecodeJpegAttr {
- return func(m optionalAttr) {
- m["ratio"] = value
+// Arguments:
+// input: A complex64 tensor.
+//
+// Returns A complex64 tensor of the same shape as `input`. The inner-most
+// dimension of `input` is replaced with its inverse 1D Fourier transform.
+//
+// @compatibility(numpy)
+// Equivalent to np.fft.ifft
+// @end_compatibility
+func IFFT(scope *Scope, input tf.Output) (output tf.Output) {
+ if scope.Err() != nil {
+ return
}
+ opspec := tf.OpSpec{
+ Type: "IFFT",
+ Input: []tf.Input{
+ input,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// DecodeJpegFancyUpscaling sets the optional fancy_upscaling attribute to value.
+// LRNAttr is an optional argument to LRN.
+type LRNAttr func(optionalAttr)
+
+// LRNDepthRadius sets the optional depth_radius attribute to value.
//
-// value: If true use a slower but nicer upscaling of the
-// chroma planes (yuv420/422 only).
-// If not specified, defaults to true
-func DecodeJpegFancyUpscaling(value bool) DecodeJpegAttr {
+// value: 0-D. Half-width of the 1-D normalization window.
+// If not specified, defaults to 5
+func LRNDepthRadius(value int64) LRNAttr {
return func(m optionalAttr) {
- m["fancy_upscaling"] = value
+ m["depth_radius"] = value
}
}
-// DecodeJpegTryRecoverTruncated sets the optional try_recover_truncated attribute to value.
+// LRNBias sets the optional bias attribute to value.
//
-// value: If true try to recover an image from truncated input.
-// If not specified, defaults to false
-func DecodeJpegTryRecoverTruncated(value bool) DecodeJpegAttr {
+// value: An offset (usually positive to avoid dividing by 0).
+// If not specified, defaults to 1
+func LRNBias(value float32) LRNAttr {
return func(m optionalAttr) {
- m["try_recover_truncated"] = value
+ m["bias"] = value
}
}
-// DecodeJpegAcceptableFraction sets the optional acceptable_fraction attribute to value.
+// LRNAlpha sets the optional alpha attribute to value.
//
-// value: The minimum required fraction of lines before a truncated
-// input is accepted.
+// value: A scale factor, usually positive.
// If not specified, defaults to 1
-func DecodeJpegAcceptableFraction(value float32) DecodeJpegAttr {
+func LRNAlpha(value float32) LRNAttr {
return func(m optionalAttr) {
- m["acceptable_fraction"] = value
+ m["alpha"] = value
}
}
-// DecodeJpegDctMethod sets the optional dct_method attribute to value.
+// LRNBeta sets the optional beta attribute to value.
//
-// value: string specifying a hint about the algorithm used for
-// decompression. Defaults to "" which maps to a system-specific
-// default. Currently valid values are ["INTEGER_FAST",
-// "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal
-// jpeg library changes to a version that does not have that specific
-// option.)
-// If not specified, defaults to ""
-func DecodeJpegDctMethod(value string) DecodeJpegAttr {
+// value: An exponent.
+// If not specified, defaults to 0.5
+func LRNBeta(value float32) LRNAttr {
return func(m optionalAttr) {
- m["dct_method"] = value
+ m["beta"] = value
}
}
-// Decode a JPEG-encoded image to a uint8 tensor.
-//
-// The attr `channels` indicates the desired number of color channels for the
-// decoded image.
-//
-// Accepted values are:
-//
-// * 0: Use the number of channels in the JPEG-encoded image.
-// * 1: output a grayscale image.
-// * 3: output an RGB image.
-//
-// If needed, the JPEG-encoded image is transformed to match the requested number
-// of color channels.
+// Local Response Normalization.
//
-// The attr `ratio` allows downscaling the image by an integer factor during
-// decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than
-// downscaling the image later.
+// The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last
+// dimension), and each vector is normalized independently. Within a given vector,
+// each component is divided by the weighted, squared sum of inputs within
+// `depth_radius`. In detail,
//
+// sqr_sum[a, b, c, d] =
+// sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
+// output = input / (bias + alpha * sqr_sum) ** beta
//
-// This op also supports decoding PNGs and non-animated GIFs since the interface is
-// the same, though it is cleaner to use `tf.image.decode_image`.
+// For details, see [Krizhevsky et al., ImageNet classification with deep
+// convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).
//
// Arguments:
-// contents: 0-D. The JPEG-encoded image.
-//
-// Returns 3-D with shape `[height, width, channels]`..
-func DecodeJpeg(scope *Scope, contents tf.Output, optional ...DecodeJpegAttr) (image tf.Output) {
+// input: 4-D.
+func LRN(scope *Scope, input tf.Output, optional ...LRNAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -7171,9 +7103,9 @@ func DecodeJpeg(scope *Scope, contents tf.Output, optional ...DecodeJpegAttr) (i
a(attrs)
}
opspec := tf.OpSpec{
- Type: "DecodeJpeg",
+ Type: "LRN",
Input: []tf.Input{
- contents,
+ input,
},
Attrs: attrs,
}
@@ -7181,31 +7113,74 @@ func DecodeJpeg(scope *Scope, contents tf.Output, optional ...DecodeJpegAttr) (i
return op.Output(0)
}
-// ResizeNearestNeighborGradAttr is an optional argument to ResizeNearestNeighborGrad.
-type ResizeNearestNeighborGradAttr func(optionalAttr)
+// Creates a dataset that zips together `input_datasets`.
+func ZipDataset(scope *Scope, input_datasets []tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
+ opspec := tf.OpSpec{
+ Type: "ZipDataset",
+ Input: []tf.Input{
+ tf.OutputList(input_datasets),
+ },
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
-// ResizeNearestNeighborGradAlignCorners sets the optional align_corners attribute to value.
+// Writes a `GraphDef` protocol buffer to a `SummaryWriter`.
//
-// value: If true, rescale grads by (orig_height - 1) / (height - 1), which
-// exactly aligns the 4 corners of grads and original_image. If false, rescale by
-// orig_height / height. Treat similarly the width dimension.
+// Arguments:
+// writer: Handle of `SummaryWriter`.
+// step: The step to write the summary for.
+// tensor: A scalar string of the serialized tf.GraphDef proto.
+//
+// Returns the created operation.
+func WriteGraphSummary(scope *Scope, writer tf.Output, step tf.Output, tensor tf.Output) (o *tf.Operation) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "WriteGraphSummary",
+ Input: []tf.Input{
+ writer, step, tensor,
+ },
+ }
+ return scope.AddOperation(opspec)
+}
+
+// ResourceSparseApplyAdagradAttr is an optional argument to ResourceSparseApplyAdagrad.
+type ResourceSparseApplyAdagradAttr func(optionalAttr)
+
+// ResourceSparseApplyAdagradUseLocking sets the optional use_locking attribute to value.
+//
+// value: If `True`, updating of the var and accum tensors will be protected
+// by a lock; otherwise the behavior is undefined, but may exhibit less
+// contention.
// If not specified, defaults to false
-func ResizeNearestNeighborGradAlignCorners(value bool) ResizeNearestNeighborGradAttr {
+func ResourceSparseApplyAdagradUseLocking(value bool) ResourceSparseApplyAdagradAttr {
return func(m optionalAttr) {
- m["align_corners"] = value
+ m["use_locking"] = value
}
}
-// Computes the gradient of nearest neighbor interpolation.
+// Update relevant entries in '*var' and '*accum' according to the adagrad scheme.
+//
+// That is for rows we have grad for, we update var and accum as follows:
+// accum += grad * grad
+// var -= lr * grad * (1 / sqrt(accum))
//
// Arguments:
-// grads: 4-D with shape `[batch, height, width, channels]`.
-// size: = A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The
-// original input size.
+// var_: Should be from a Variable().
+// accum: Should be from a Variable().
+// lr: Learning rate. Must be a scalar.
+// grad: The gradient.
+// indices: A vector of indices into the first dimension of var and accum.
//
-// Returns 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients
-// with respect to the input image.
-func ResizeNearestNeighborGrad(scope *Scope, grads tf.Output, size tf.Output, optional ...ResizeNearestNeighborGradAttr) (output tf.Output) {
+// Returns the created operation.
+func ResourceSparseApplyAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyAdagradAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
@@ -7214,32 +7189,78 @@ func ResizeNearestNeighborGrad(scope *Scope, grads tf.Output, size tf.Output, op
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ResizeNearestNeighborGrad",
+ Type: "ResourceSparseApplyAdagrad",
Input: []tf.Input{
- grads, size,
+ var_, accum, lr, grad, indices,
},
Attrs: attrs,
}
+ return scope.AddOperation(opspec)
+}
+
+// 2D real-valued fast Fourier transform.
+//
+// Computes the 2-dimensional discrete Fourier transform of a real-valued signal
+// over the inner-most 2 dimensions of `input`.
+//
+// Since the DFT of a real signal is Hermitian-symmetric, `RFFT2D` only returns the
+// `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
+// of `output`: the zero-frequency term, followed by the `fft_length / 2`
+// positive-frequency terms.
+//
+// Along each axis `RFFT2D` is computed on, if `fft_length` is smaller than the
+// corresponding dimension of `input`, the dimension is cropped. If it is larger,
+// the dimension is padded with zeros.
+//
+// Arguments:
+// input: A float32 tensor.
+// fft_length: An int32 tensor of shape [2]. The FFT length for each dimension.
+//
+// Returns A complex64 tensor of the same rank as `input`. The inner-most 2
+// dimensions of `input` are replaced with their 2D Fourier transform. The
+// inner-most dimension contains `fft_length / 2 + 1` unique frequency
+// components.
+//
+// @compatibility(numpy)
+// Equivalent to np.fft.rfft2
+// @end_compatibility
+func RFFT2D(scope *Scope, input tf.Output, fft_length tf.Output) (output tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "RFFT2D",
+ Input: []tf.Input{
+ input, fft_length,
+ },
+ }
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// ResizeNearestNeighborAttr is an optional argument to ResizeNearestNeighbor.
-type ResizeNearestNeighborAttr func(optionalAttr)
+// ResizeAreaAttr is an optional argument to ResizeArea.
+type ResizeAreaAttr func(optionalAttr)
-// ResizeNearestNeighborAlignCorners sets the optional align_corners attribute to value.
+// ResizeAreaAlignCorners sets the optional align_corners attribute to value.
//
// value: If true, rescale input by (new_height - 1) / (height - 1), which
// exactly aligns the 4 corners of images and resized images. If false, rescale
// by new_height / height. Treat similarly the width dimension.
// If not specified, defaults to false
-func ResizeNearestNeighborAlignCorners(value bool) ResizeNearestNeighborAttr {
+func ResizeAreaAlignCorners(value bool) ResizeAreaAttr {
return func(m optionalAttr) {
m["align_corners"] = value
}
}
-// Resize `images` to `size` using nearest neighbor interpolation.
+// Resize `images` to `size` using area interpolation.
+//
+// Input images can be of different types but output images are always float.
+//
+// Each output pixel is computed by first transforming the pixel's footprint into
+// the input tensor and then averaging the pixels that intersect the footprint. An
+// input pixel's contribution to the average is weighted by the fraction of its
+// area that intersects the footprint. This is the same as OpenCV's INTER_AREA.
//
// Arguments:
// images: 4-D with shape `[batch, height, width, channels]`.
@@ -7248,7 +7269,7 @@ func ResizeNearestNeighborAlignCorners(value bool) ResizeNearestNeighborAttr {
//
// Returns 4-D with shape
// `[batch, new_height, new_width, channels]`.
-func ResizeNearestNeighbor(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeNearestNeighborAttr) (resized_images tf.Output) {
+func ResizeArea(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeAreaAttr) (resized_images tf.Output) {
if scope.Err() != nil {
return
}
@@ -7257,7 +7278,7 @@ func ResizeNearestNeighbor(scope *Scope, images tf.Output, size tf.Output, optio
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ResizeNearestNeighbor",
+ Type: "ResizeArea",
Input: []tf.Input{
images, size,
},
@@ -7267,32 +7288,32 @@ func ResizeNearestNeighbor(scope *Scope, images tf.Output, size tf.Output, optio
return op.Output(0)
}
-// ResizeBicubicGradAttr is an optional argument to ResizeBicubicGrad.
-type ResizeBicubicGradAttr func(optionalAttr)
+// StatelessRandomUniformAttr is an optional argument to StatelessRandomUniform.
+type StatelessRandomUniformAttr func(optionalAttr)
-// ResizeBicubicGradAlignCorners sets the optional align_corners attribute to value.
+// StatelessRandomUniformDtype sets the optional dtype attribute to value.
//
-// value: If true, rescale grads by (orig_height - 1) / (height - 1), which
-// exactly aligns the 4 corners of grads and original_image. If false, rescale by
-// orig_height / height. Treat similarly the width dimension.
-// If not specified, defaults to false
-func ResizeBicubicGradAlignCorners(value bool) ResizeBicubicGradAttr {
+// value: The type of the output.
+// If not specified, defaults to DT_FLOAT
+func StatelessRandomUniformDtype(value tf.DataType) StatelessRandomUniformAttr {
return func(m optionalAttr) {
- m["align_corners"] = value
+ m["dtype"] = value
}
}
-// Computes the gradient of bicubic interpolation.
+// Outputs deterministic pseudorandom random values from a uniform distribution.
+//
+// The generated values follow a uniform distribution in the range `[0, 1)`. The
+// lower bound 0 is included in the range, while the upper bound 1 is excluded.
+//
+// The outputs are a deterministic function of `shape` and `seed`.
//
// Arguments:
-// grads: 4-D with shape `[batch, height, width, channels]`.
-// original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`,
-// The image tensor that was resized.
+// shape: The shape of the output tensor.
+// seed: 2 seeds (shape [2]).
//
-// Returns 4-D with shape `[batch, orig_height, orig_width, channels]`.
-// Gradients with respect to the input image. Input image must have been
-// float or double.
-func ResizeBicubicGrad(scope *Scope, grads tf.Output, original_image tf.Output, optional ...ResizeBicubicGradAttr) (output tf.Output) {
+// Returns Random values with specified shape.
+func StatelessRandomUniform(scope *Scope, shape tf.Output, seed tf.Output, optional ...StatelessRandomUniformAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -7301,9 +7322,9 @@ func ResizeBicubicGrad(scope *Scope, grads tf.Output, original_image tf.Output,
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ResizeBicubicGrad",
+ Type: "StatelessRandomUniform",
Input: []tf.Input{
- grads, original_image,
+ shape, seed,
},
Attrs: attrs,
}
@@ -7311,32 +7332,37 @@ func ResizeBicubicGrad(scope *Scope, grads tf.Output, original_image tf.Output,
return op.Output(0)
}
-// SummaryWriterAttr is an optional argument to SummaryWriter.
-type SummaryWriterAttr func(optionalAttr)
-
-// SummaryWriterSharedName sets the optional shared_name attribute to value.
-// If not specified, defaults to ""
-func SummaryWriterSharedName(value string) SummaryWriterAttr {
- return func(m optionalAttr) {
- m["shared_name"] = value
- }
-}
+// AngleAttr is an optional argument to Angle.
+type AngleAttr func(optionalAttr)
-// SummaryWriterContainer sets the optional container attribute to value.
-// If not specified, defaults to ""
-func SummaryWriterContainer(value string) SummaryWriterAttr {
+// AngleTout sets the optional Tout attribute to value.
+// If not specified, defaults to DT_FLOAT
+func AngleTout(value tf.DataType) AngleAttr {
return func(m optionalAttr) {
- m["container"] = value
+ m["Tout"] = value
}
}
-// Returns a handle to be used to access a summary writer.
+// Returns the argument of a complex number.
//
-// The summary writer is an in-graph resource which can be used by ops to write
-// summaries to event files.
+// Given a tensor `input` of complex numbers, this operation returns a tensor of
+// type `float` that is the argument of each element in `input`. All elements in
+// `input` must be complex numbers of the form \\(a + bj\\), where *a*
+// is the real part and *b* is the imaginary part.
//
-// Returns the summary writer resource. Scalar handle.
-func SummaryWriter(scope *Scope, optional ...SummaryWriterAttr) (writer tf.Output) {
+// The argument returned by this operation is of the form \\(atan2(b, a)\\).
+//
+// For example:
+//
+// ```
+// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
+// tf.angle(input) ==> [2.0132, 1.056]
+// ```
+//
+// @compatibility(numpy)
+// Equivalent to np.angle.
+// @end_compatibility
+func Angle(scope *Scope, input tf.Output, optional ...AngleAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -7345,189 +7371,248 @@ func SummaryWriter(scope *Scope, optional ...SummaryWriterAttr) (writer tf.Outpu
a(attrs)
}
opspec := tf.OpSpec{
- Type: "SummaryWriter",
-
+ Type: "Angle",
+ Input: []tf.Input{
+ input,
+ },
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Returns the set of files matching one or more glob patterns.
-//
-// Note that this routine only supports wildcard characters in the
-// basename portion of the pattern, not in the directory portion.
-//
-// Arguments:
-// pattern: Shell wildcard pattern(s). Scalar or vector of type string.
+// VarHandleOpAttr is an optional argument to VarHandleOp.
+type VarHandleOpAttr func(optionalAttr)
+
+// VarHandleOpContainer sets the optional container attribute to value.
//
-// Returns A vector of matching filenames.
-func MatchingFiles(scope *Scope, pattern tf.Output) (filenames tf.Output) {
- if scope.Err() != nil {
- return
+// value: the container this variable is placed in.
+// If not specified, defaults to ""
+func VarHandleOpContainer(value string) VarHandleOpAttr {
+ return func(m optionalAttr) {
+ m["container"] = value
}
- opspec := tf.OpSpec{
- Type: "MatchingFiles",
- Input: []tf.Input{
- pattern,
- },
+}
+
+// VarHandleOpSharedName sets the optional shared_name attribute to value.
+//
+// value: the name by which this variable is referred to.
+// If not specified, defaults to ""
+func VarHandleOpSharedName(value string) VarHandleOpAttr {
+ return func(m optionalAttr) {
+ m["shared_name"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// Store the input tensor in the state of the current session.
+// Creates a handle to a Variable resource.
//
// Arguments:
-// value: The tensor to be stored.
-//
-// Returns The handle for the tensor stored in the session state, represented
-// as a ResourceHandle object.
-func GetSessionHandleV2(scope *Scope, value tf.Output) (handle tf.Output) {
+// dtype: the type of this variable. Must agree with the dtypes
+// of all ops using this variable.
+// shape: The (possibly partially specified) shape of this variable.
+func VarHandleOp(scope *Scope, dtype tf.DataType, shape tf.Shape, optional ...VarHandleOpAttr) (resource tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "GetSessionHandleV2",
- Input: []tf.Input{
- value,
- },
+ Type: "VarHandleOp",
+
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Adjust the hue of one or more images.
-//
-// `images` is a tensor of at least 3 dimensions. The last dimension is
-// interpretted as channels, and must be three.
-//
-// The input image is considered in the RGB colorspace. Conceptually, the RGB
-// colors are first mapped into HSV. A delta is then applied all the hue values,
-// and then remapped back to RGB colorspace.
+// Creates a summary file writer accessible by the given resource handle.
//
// Arguments:
-// images: Images to adjust. At least 3-D.
-// delta: A float delta to add to the hue.
+// writer: A handle to the summary writer resource
+// logdir: Directory where the event file will be written.
+// max_queue: Size of the queue of pending events and summaries.
+// flush_millis: How often, in milliseconds, to flush the pending events and
+// summaries to disk.
+// filename_suffix: Every event file's name is suffixed with this suffix.
//
-// Returns The hue-adjusted image or images.
-func AdjustHue(scope *Scope, images tf.Output, delta tf.Output) (output tf.Output) {
+// Returns the created operation.
+func CreateSummaryFileWriter(scope *Scope, writer tf.Output, logdir tf.Output, max_queue tf.Output, flush_millis tf.Output, filename_suffix tf.Output) (o *tf.Operation) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "AdjustHue",
+ Type: "CreateSummaryFileWriter",
Input: []tf.Input{
- images, delta,
+ writer, logdir, max_queue, flush_millis, filename_suffix,
},
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// Restore a Reader to its initial clean state.
-//
-// Arguments:
-// reader_handle: Handle to a Reader.
+// Elementwise computes the bitwise XOR of `x` and `y`.
//
-// Returns the created operation.
-func ReaderResetV2(scope *Scope, reader_handle tf.Output) (o *tf.Operation) {
+// The result will have those bits set, that are different in `x` and `y`. The
+// computation is performed on the underlying representations of `x` and `y`.
+func BitwiseXor(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "ReaderResetV2",
+ Type: "BitwiseXor",
Input: []tf.Input{
- reader_handle,
+ x, y,
},
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Returns up to `num_records` (key, value) pairs produced by a Reader.
+// Deserialize `SparseTensor` objects.
//
-// Will dequeue from the input queue if necessary (e.g. when the
-// Reader needs to start reading from a new file since it has finished
-// with the previous file).
-// It may return less than `num_records` even before the last batch.
+// The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where
+// the last dimension stores serialized `SparseTensor` objects and the other N
+// dimensions (N >= 0) correspond to a batch. The ranks of the original
+// `SparseTensor` objects must all match. When the final `SparseTensor` is
+// created, its rank is the rank of the incoming `SparseTensor` objects plus N;
+// the sparse tensors have been concatenated along new dimensions, one for each
+// batch.
//
-// Arguments:
-// reader_handle: Handle to a `Reader`.
-// queue_handle: Handle to a `Queue`, with string work items.
-// num_records: number of records to read from `Reader`.
+// The output `SparseTensor` object's shape values for the original dimensions
+// are the max across the input `SparseTensor` objects' shape values for the
+// corresponding dimensions. The new dimensions match the size of the batch.
//
-// Returns A 1-D tensor.A 1-D tensor.
-func ReaderReadUpToV2(scope *Scope, reader_handle tf.Output, queue_handle tf.Output, num_records tf.Output) (keys tf.Output, values tf.Output) {
+// The input `SparseTensor` objects' indices are assumed ordered in
+// standard lexicographic order. If this is not the case, after this
+// step run `SparseReorder` to restore index ordering.
+//
+// For example, if the serialized input is a `[2 x 3]` matrix representing two
+// original `SparseTensor` objects:
+//
+// index = [ 0]
+// [10]
+// [20]
+// values = [1, 2, 3]
+// shape = [50]
+//
+// and
+//
+// index = [ 2]
+// [10]
+// values = [4, 5]
+// shape = [30]
+//
+// then the final deserialized `SparseTensor` will be:
+//
+// index = [0 0]
+// [0 10]
+// [0 20]
+// [1 2]
+// [1 10]
+// values = [1, 2, 3, 4, 5]
+// shape = [2 50]
+//
+// Arguments:
+// serialized_sparse: The serialized `SparseTensor` objects. The last dimension
+// must have 3 columns.
+// dtype: The `dtype` of the serialized `SparseTensor` objects.
+func DeserializeSparse(scope *Scope, serialized_sparse tf.Output, dtype tf.DataType) (sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"dtype": dtype}
opspec := tf.OpSpec{
- Type: "ReaderReadUpToV2",
+ Type: "DeserializeSparse",
Input: []tf.Input{
- reader_handle, queue_handle, num_records,
+ serialized_sparse,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// Returns the next record (key, value pair) produced by a Reader.
+// ResourceApplyRMSPropAttr is an optional argument to ResourceApplyRMSProp.
+type ResourceApplyRMSPropAttr func(optionalAttr)
+
+// ResourceApplyRMSPropUseLocking sets the optional use_locking attribute to value.
//
-// Will dequeue from the input queue if necessary (e.g. when the
-// Reader needs to start reading from a new file since it has finished
-// with the previous file).
+// value: If `True`, updating of the var, ms, and mom tensors is protected
+// by a lock; otherwise the behavior is undefined, but may exhibit less
+// contention.
+// If not specified, defaults to false
+func ResourceApplyRMSPropUseLocking(value bool) ResourceApplyRMSPropAttr {
+ return func(m optionalAttr) {
+ m["use_locking"] = value
+ }
+}
+
+// Update '*var' according to the RMSProp algorithm.
+//
+// Note that in dense implementation of this algorithm, ms and mom will
+// update even if the grad is zero, but in this sparse implementation, ms
+// and mom will not update in iterations during which the grad is zero.
+//
+// mean_square = decay * mean_square + (1-decay) * gradient ** 2
+// Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
+//
+// ms <- rho * ms_{t-1} + (1-rho) * grad * grad
+// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
+// var <- var - mom
//
// Arguments:
-// reader_handle: Handle to a Reader.
-// queue_handle: Handle to a Queue, with string work items.
+// var_: Should be from a Variable().
+// ms: Should be from a Variable().
+// mom: Should be from a Variable().
+// lr: Scaling factor. Must be a scalar.
+// rho: Decay rate. Must be a scalar.
//
-// Returns A scalar.A scalar.
-func ReaderReadV2(scope *Scope, reader_handle tf.Output, queue_handle tf.Output) (key tf.Output, value tf.Output) {
+// epsilon: Ridge term. Must be a scalar.
+// grad: The gradient.
+//
+// Returns the created operation.
+func ResourceApplyRMSProp(scope *Scope, var_ tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyRMSPropAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "ReaderReadV2",
+ Type: "ResourceApplyRMSProp",
Input: []tf.Input{
- reader_handle, queue_handle,
+ var_, ms, mom, lr, rho, momentum, epsilon, grad,
},
+ Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
+ return scope.AddOperation(opspec)
}
-// IdentityReaderV2Attr is an optional argument to IdentityReaderV2.
-type IdentityReaderV2Attr func(optionalAttr)
+// SizeAttr is an optional argument to Size.
+type SizeAttr func(optionalAttr)
-// IdentityReaderV2Container sets the optional container attribute to value.
-//
-// value: If non-empty, this reader is placed in the given container.
-// Otherwise, a default container is used.
-// If not specified, defaults to ""
-func IdentityReaderV2Container(value string) IdentityReaderV2Attr {
+// SizeOutType sets the optional out_type attribute to value.
+// If not specified, defaults to DT_INT32
+func SizeOutType(value tf.DataType) SizeAttr {
return func(m optionalAttr) {
- m["container"] = value
+ m["out_type"] = value
}
}
-// IdentityReaderV2SharedName sets the optional shared_name attribute to value.
+// Returns the size of a tensor.
//
-// value: If non-empty, this reader is named in the given bucket
-// with this shared_name. Otherwise, the node name is used instead.
-// If not specified, defaults to ""
-func IdentityReaderV2SharedName(value string) IdentityReaderV2Attr {
- return func(m optionalAttr) {
- m["shared_name"] = value
- }
-}
-
-// A Reader that outputs the queued work as both the key and value.
+// This operation returns an integer representing the number of elements in
+// `input`.
//
-// To use, enqueue strings in a Queue. ReaderRead will take the front
-// work string and output (work, work).
+// For example:
//
-// Returns The handle to reference the Reader.
-func IdentityReaderV2(scope *Scope, optional ...IdentityReaderV2Attr) (reader_handle tf.Output) {
+// ```
+// # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
+// size(t) ==> 12
+// ```
+func Size(scope *Scope, input tf.Output, optional ...SizeAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -7536,51 +7621,78 @@ func IdentityReaderV2(scope *Scope, optional ...IdentityReaderV2Attr) (reader_ha
a(attrs)
}
opspec := tf.OpSpec{
- Type: "IdentityReaderV2",
-
+ Type: "Size",
+ Input: []tf.Input{
+ input,
+ },
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// TFRecordReaderV2Attr is an optional argument to TFRecordReaderV2.
-type TFRecordReaderV2Attr func(optionalAttr)
+// ResourceScatterNdUpdateAttr is an optional argument to ResourceScatterNdUpdate.
+type ResourceScatterNdUpdateAttr func(optionalAttr)
-// TFRecordReaderV2Container sets the optional container attribute to value.
+// ResourceScatterNdUpdateUseLocking sets the optional use_locking attribute to value.
//
-// value: If non-empty, this reader is placed in the given container.
-// Otherwise, a default container is used.
-// If not specified, defaults to ""
-func TFRecordReaderV2Container(value string) TFRecordReaderV2Attr {
+// value: An optional bool. Defaults to True. If True, the assignment will
+// be protected by a lock; otherwise the behavior is undefined,
+// but may exhibit less contention.
+// If not specified, defaults to true
+func ResourceScatterNdUpdateUseLocking(value bool) ResourceScatterNdUpdateAttr {
return func(m optionalAttr) {
- m["container"] = value
+ m["use_locking"] = value
}
}
-// TFRecordReaderV2SharedName sets the optional shared_name attribute to value.
+// Applies sparse `updates` to individual values or slices within a given
//
-// value: If non-empty, this reader is named in the given bucket
-// with this shared_name. Otherwise, the node name is used instead.
-// If not specified, defaults to ""
-func TFRecordReaderV2SharedName(value string) TFRecordReaderV2Attr {
- return func(m optionalAttr) {
- m["shared_name"] = value
- }
-}
-
-// TFRecordReaderV2CompressionType sets the optional compression_type attribute to value.
-// If not specified, defaults to ""
-func TFRecordReaderV2CompressionType(value string) TFRecordReaderV2Attr {
- return func(m optionalAttr) {
- m["compression_type"] = value
- }
-}
-
-// A Reader that outputs the records from a TensorFlow Records file.
+// variable according to `indices`.
//
-// Returns The handle to reference the Reader.
-func TFRecordReaderV2(scope *Scope, optional ...TFRecordReaderV2Attr) (reader_handle tf.Output) {
+// `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
+//
+// `indices` must be integer tensor, containing indices into `ref`.
+// It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+//
+// The innermost dimension of `indices` (with length `K`) corresponds to
+// indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
+// dimension of `ref`.
+//
+// `updates` is `Tensor` of rank `Q-1+P-K` with shape:
+//
+// ```
+// [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
+// ```
+//
+// For example, say we want to update 4 scattered elements to a rank-1 tensor to
+// 8 elements. In Python, that update would look like this:
+//
+// ```python
+// ref = tfe.Variable([1, 2, 3, 4, 5, 6, 7, 8])
+// indices = tf.constant([[4], [3], [1] ,[7]])
+// updates = tf.constant([9, 10, 11, 12])
+// update = tf.scatter_nd_update(ref, indices, updates)
+// with tf.Session() as sess:
+// print sess.run(update)
+// ```
+//
+// The resulting update to ref would look like this:
+//
+// [1, 11, 3, 10, 9, 6, 7, 12]
+//
+// See @{tf.scatter_nd} for more details about how to make updates to
+// slices.
+//
+// Arguments:
+// ref: A resource handle. Must be from a VarHandleOp.
+// indices: A Tensor. Must be one of the following types: int32, int64.
+// A tensor of indices into ref.
+// updates: A Tensor. Must have the same type as ref. A tensor of updated
+// values to add to ref.
+//
+// Returns the created operation.
+func ResourceScatterNdUpdate(scope *Scope, ref tf.Output, indices tf.Output, updates tf.Output, optional ...ResourceScatterNdUpdateAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
@@ -7589,62 +7701,65 @@ func TFRecordReaderV2(scope *Scope, optional ...TFRecordReaderV2Attr) (reader_ha
a(attrs)
}
opspec := tf.OpSpec{
- Type: "TFRecordReaderV2",
-
+ Type: "ResourceScatterNdUpdate",
+ Input: []tf.Input{
+ ref, indices, updates,
+ },
Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// TextLineReaderV2Attr is an optional argument to TextLineReaderV2.
-type TextLineReaderV2Attr func(optionalAttr)
+// StageSizeAttr is an optional argument to StageSize.
+type StageSizeAttr func(optionalAttr)
-// TextLineReaderV2SkipHeaderLines sets the optional skip_header_lines attribute to value.
-//
-// value: Number of lines to skip from the beginning of every file.
+// StageSizeCapacity sets the optional capacity attribute to value.
// If not specified, defaults to 0
-func TextLineReaderV2SkipHeaderLines(value int64) TextLineReaderV2Attr {
+//
+// REQUIRES: value >= 0
+func StageSizeCapacity(value int64) StageSizeAttr {
return func(m optionalAttr) {
- m["skip_header_lines"] = value
+ m["capacity"] = value
}
}
-// TextLineReaderV2Container sets the optional container attribute to value.
+// StageSizeMemoryLimit sets the optional memory_limit attribute to value.
+// If not specified, defaults to 0
//
-// value: If non-empty, this reader is placed in the given container.
-// Otherwise, a default container is used.
+// REQUIRES: value >= 0
+func StageSizeMemoryLimit(value int64) StageSizeAttr {
+ return func(m optionalAttr) {
+ m["memory_limit"] = value
+ }
+}
+
+// StageSizeContainer sets the optional container attribute to value.
// If not specified, defaults to ""
-func TextLineReaderV2Container(value string) TextLineReaderV2Attr {
+func StageSizeContainer(value string) StageSizeAttr {
return func(m optionalAttr) {
m["container"] = value
}
}
-// TextLineReaderV2SharedName sets the optional shared_name attribute to value.
-//
-// value: If non-empty, this reader is named in the given bucket
-// with this shared_name. Otherwise, the node name is used instead.
+// StageSizeSharedName sets the optional shared_name attribute to value.
// If not specified, defaults to ""
-func TextLineReaderV2SharedName(value string) TextLineReaderV2Attr {
+func StageSizeSharedName(value string) StageSizeAttr {
return func(m optionalAttr) {
m["shared_name"] = value
}
}
-// A Reader that outputs the lines of a file delimited by '\n'.
-//
-// Returns The handle to reference the Reader.
-func TextLineReaderV2(scope *Scope, optional ...TextLineReaderV2Attr) (reader_handle tf.Output) {
+// Op returns the number of elements in the underlying container.
+func StageSize(scope *Scope, dtypes []tf.DataType, optional ...StageSizeAttr) (size tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"dtypes": dtypes}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "TextLineReaderV2",
+ Type: "StageSize",
Attrs: attrs,
}
@@ -7652,134 +7767,152 @@ func TextLineReaderV2(scope *Scope, optional ...TextLineReaderV2Attr) (reader_ha
return op.Output(0)
}
-// Generate a glob pattern matching all sharded file names.
-func ShardedFilespec(scope *Scope, basename tf.Output, num_shards tf.Output) (filename tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "ShardedFilespec",
- Input: []tf.Input{
- basename, num_shards,
- },
+// NonMaxSuppressionAttr is an optional argument to NonMaxSuppression.
+type NonMaxSuppressionAttr func(optionalAttr)
+
+// NonMaxSuppressionIouThreshold sets the optional iou_threshold attribute to value.
+//
+// value: A float representing the threshold for deciding whether boxes
+// overlap too much with respect to IOU.
+// If not specified, defaults to 0.5
+func NonMaxSuppressionIouThreshold(value float32) NonMaxSuppressionAttr {
+ return func(m optionalAttr) {
+ m["iou_threshold"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// Delete the stack from its resource container.
+// Greedily selects a subset of bounding boxes in descending order of score,
+//
+// pruning away boxes that have high intersection-over-union (IOU) overlap
+// with previously selected boxes. Bounding boxes are supplied as
+// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
+// diagonal pair of box corners and the coordinates can be provided as normalized
+// (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm
+// is agnostic to where the origin is in the coordinate system. Note that this
+// algorithm is invariant to orthogonal transformations and translations
+// of the coordinate system; thus translating or reflections of the coordinate
+// system result in the same boxes being selected by the algorithm.
+// The output of this operation is a set of integers indexing into the input
+// collection of bounding boxes representing the selected boxes. The bounding
+// box coordinates corresponding to the selected indices can then be obtained
+// using the `tf.gather operation`. For example:
+// selected_indices = tf.image.non_max_suppression(
+// boxes, scores, max_output_size, iou_threshold)
+// selected_boxes = tf.gather(boxes, selected_indices)
//
// Arguments:
-// handle: The handle to a stack.
+// boxes: A 2-D float tensor of shape `[num_boxes, 4]`.
+// scores: A 1-D float tensor of shape `[num_boxes]` representing a single
+// score corresponding to each box (each row of boxes).
+// max_output_size: A scalar integer tensor representing the maximum number of
+// boxes to be selected by non max suppression.
//
-// Returns the created operation.
-func StackCloseV2(scope *Scope, handle tf.Output) (o *tf.Operation) {
+// Returns A 1-D integer tensor of shape `[M]` representing the selected
+// indices from the boxes tensor, where `M <= max_output_size`.
+func NonMaxSuppression(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, optional ...NonMaxSuppressionAttr) (selected_indices tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "StackCloseV2",
+ Type: "NonMaxSuppression",
Input: []tf.Input{
- handle,
+ boxes, scores, max_output_size,
},
+ Attrs: attrs,
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Generate a sharded filename. The filename is printf formatted as
-//
-// %s-%05d-of-%05d, basename, shard, num_shards.
-func ShardedFilename(scope *Scope, basename tf.Output, shard tf.Output, num_shards tf.Output) (filename tf.Output) {
+// Creates a dataset that emits `components` as a tuple of tensors once.
+func TensorDataset(scope *Scope, components []tf.Output, output_shapes []tf.Shape) (handle tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"output_shapes": output_shapes}
opspec := tf.OpSpec{
- Type: "ShardedFilename",
+ Type: "TensorDataset",
Input: []tf.Input{
- basename, shard, num_shards,
+ tf.OutputList(components),
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Saves input tensors slices to disk.
-//
-// This is like `Save` except that tensors can be listed in the saved file as being
-// a slice of a larger tensor. `shapes_and_slices` specifies the shape of the
-// larger tensor and the slice that this tensor covers. `shapes_and_slices` must
-// have as many elements as `tensor_names`.
-//
-// Elements of the `shapes_and_slices` input must either be:
-//
-// * The empty string, in which case the corresponding tensor is
-// saved normally.
-// * A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the
-// `dimI` are the dimensions of the larger tensor and `slice-spec`
-// specifies what part is covered by the tensor to save.
-//
-// `slice-spec` itself is a `:`-separated list: `slice0:slice1:...:sliceN-1`
-// where each `sliceI` is either:
+// Component-wise multiplies a SparseTensor by a dense Tensor.
//
-// * The string `-` meaning that the slice covers all indices of this dimension
-// * `start,length` where `start` and `length` are integers. In that
-// case the slice covers `length` indices starting at `start`.
+// The output locations corresponding to the implicitly zero elements in the sparse
+// tensor will be zero (i.e., will not take up storage space), regardless of the
+// contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN).
//
-// See also `Save`.
+// *Limitation*: this Op only broadcasts the dense side to the sparse side, but not
+// the other direction.
//
// Arguments:
-// filename: Must have a single element. The name of the file to which we write the
-// tensor.
-// tensor_names: Shape `[N]`. The names of the tensors to be saved.
-// shapes_and_slices: Shape `[N]`. The shapes and slice specifications to use when
-// saving the tensors.
-// data: `N` tensors to save.
+// sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
+// SparseTensor, possibly not in canonical ordering.
+// sp_values: 1-D. `N` non-empty values corresponding to `sp_indices`.
+// sp_shape: 1-D. Shape of the input SparseTensor.
+// dense: `R`-D. The dense Tensor operand.
//
-// Returns the created operation.
-func SaveSlices(scope *Scope, filename tf.Output, tensor_names tf.Output, shapes_and_slices tf.Output, data []tf.Output) (o *tf.Operation) {
+// Returns 1-D. The `N` values that are operated on.
+func SparseDenseCwiseMul(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output, dense tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "SaveSlices",
+ Type: "SparseDenseCwiseMul",
Input: []tf.Input{
- filename, tensor_names, shapes_and_slices, tf.OutputList(data),
+ sp_indices, sp_values, sp_shape, dense,
},
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// MergeV2CheckpointsAttr is an optional argument to MergeV2Checkpoints.
-type MergeV2CheckpointsAttr func(optionalAttr)
+// ResourceSparseApplyFtrlAttr is an optional argument to ResourceSparseApplyFtrl.
+type ResourceSparseApplyFtrlAttr func(optionalAttr)
-// MergeV2CheckpointsDeleteOldDirs sets the optional delete_old_dirs attribute to value.
+// ResourceSparseApplyFtrlUseLocking sets the optional use_locking attribute to value.
//
-// value: see above.
-// If not specified, defaults to true
-func MergeV2CheckpointsDeleteOldDirs(value bool) MergeV2CheckpointsAttr {
+// value: If `True`, updating of the var and accum tensors will be protected
+// by a lock; otherwise the behavior is undefined, but may exhibit less
+// contention.
+// If not specified, defaults to false
+func ResourceSparseApplyFtrlUseLocking(value bool) ResourceSparseApplyFtrlAttr {
return func(m optionalAttr) {
- m["delete_old_dirs"] = value
+ m["use_locking"] = value
}
}
-// V2 format specific: merges the metadata files of sharded checkpoints. The
-//
-// result is one logical checkpoint, with one physical metadata file and renamed
-// data files.
-//
-// Intended for "grouping" multiple checkpoints in a sharded checkpoint setup.
+// Update relevant entries in '*var' according to the Ftrl-proximal scheme.
//
-// If delete_old_dirs is true, attempts to delete recursively the dirname of each
-// path in the input checkpoint_prefixes. This is useful when those paths are non
-// user-facing temporary locations.
+// That is for rows we have grad for, we update var, accum and linear as follows:
+// accum_new = accum + grad * grad
+// linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
+// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
+// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
+// accum = accum_new
//
// Arguments:
-// checkpoint_prefixes: prefixes of V2 checkpoints to merge.
-// destination_prefix: scalar. The desired final prefix. Allowed to be the same
-// as one of the checkpoint_prefixes.
+// var_: Should be from a Variable().
+// accum: Should be from a Variable().
+// linear: Should be from a Variable().
+// grad: The gradient.
+// indices: A vector of indices into the first dimension of var and accum.
+// lr: Scaling factor. Must be a scalar.
+// l1: L1 regularization. Must be a scalar.
+// l2: L2 regularization. Must be a scalar.
+// lr_power: Scaling factor. Must be a scalar.
//
// Returns the created operation.
-func MergeV2Checkpoints(scope *Scope, checkpoint_prefixes tf.Output, destination_prefix tf.Output, optional ...MergeV2CheckpointsAttr) (o *tf.Operation) {
+func ResourceSparseApplyFtrl(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, indices tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, lr_power tf.Output, optional ...ResourceSparseApplyFtrlAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
@@ -7788,49 +7921,75 @@ func MergeV2Checkpoints(scope *Scope, checkpoint_prefixes tf.Output, destination
a(attrs)
}
opspec := tf.OpSpec{
- Type: "MergeV2Checkpoints",
+ Type: "ResourceSparseApplyFtrl",
Input: []tf.Input{
- checkpoint_prefixes, destination_prefix,
+ var_, accum, linear, grad, indices, lr, l1, l2, lr_power,
},
Attrs: attrs,
}
return scope.AddOperation(opspec)
}
-// QueueEnqueueManyV2Attr is an optional argument to QueueEnqueueManyV2.
-type QueueEnqueueManyV2Attr func(optionalAttr)
+// Returns which elements of x are Inf.
+//
+// @compatibility(numpy)
+// Equivalent to np.isinf
+// @end_compatibility
+func IsInf(scope *Scope, x tf.Output) (y tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "IsInf",
+ Input: []tf.Input{
+ x,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
-// QueueEnqueueManyV2TimeoutMs sets the optional timeout_ms attribute to value.
+// ResourceSparseApplyRMSPropAttr is an optional argument to ResourceSparseApplyRMSProp.
+type ResourceSparseApplyRMSPropAttr func(optionalAttr)
+
+// ResourceSparseApplyRMSPropUseLocking sets the optional use_locking attribute to value.
//
-// value: If the queue is too full, this operation will block for up
-// to timeout_ms milliseconds.
-// Note: This option is not supported yet.
-// If not specified, defaults to -1
-func QueueEnqueueManyV2TimeoutMs(value int64) QueueEnqueueManyV2Attr {
+// value: If `True`, updating of the var, ms, and mom tensors is protected
+// by a lock; otherwise the behavior is undefined, but may exhibit less
+// contention.
+// If not specified, defaults to false
+func ResourceSparseApplyRMSPropUseLocking(value bool) ResourceSparseApplyRMSPropAttr {
return func(m optionalAttr) {
- m["timeout_ms"] = value
+ m["use_locking"] = value
}
}
-// Enqueues zero or more tuples of one or more tensors in the given queue.
+// Update '*var' according to the RMSProp algorithm.
//
-// This operation slices each component tensor along the 0th dimension to
-// make multiple queue elements. All of the tuple components must have the
-// same size in the 0th dimension.
+// Note that in dense implementation of this algorithm, ms and mom will
+// update even if the grad is zero, but in this sparse implementation, ms
+// and mom will not update in iterations during which the grad is zero.
//
-// The components input has k elements, which correspond to the components of
-// tuples stored in the given queue.
+// mean_square = decay * mean_square + (1-decay) * gradient ** 2
+// Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
//
-// N.B. If the queue is full, this operation will block until the given
-// elements have been enqueued (or 'timeout_ms' elapses, if specified).
+// ms <- rho * ms_{t-1} + (1-rho) * grad * grad
+// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
+// var <- var - mom
//
// Arguments:
-// handle: The handle to a queue.
-// components: One or more tensors from which the enqueued tensors should
-// be taken.
+// var_: Should be from a Variable().
+// ms: Should be from a Variable().
+// mom: Should be from a Variable().
+// lr: Scaling factor. Must be a scalar.
+// rho: Decay rate. Must be a scalar.
+//
+// epsilon: Ridge term. Must be a scalar.
+// grad: The gradient.
+// indices: A vector of indices into the first dimension of var, ms and mom.
//
// Returns the created operation.
-func QueueEnqueueManyV2(scope *Scope, handle tf.Output, components []tf.Output, optional ...QueueEnqueueManyV2Attr) (o *tf.Operation) {
+func ResourceSparseApplyRMSProp(scope *Scope, var_ tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyRMSPropAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
@@ -7839,66 +7998,168 @@ func QueueEnqueueManyV2(scope *Scope, handle tf.Output, components []tf.Output,
a(attrs)
}
opspec := tf.OpSpec{
- Type: "QueueEnqueueManyV2",
+ Type: "ResourceSparseApplyRMSProp",
Input: []tf.Input{
- handle, tf.OutputList(components),
+ var_, ms, mom, lr, rho, momentum, epsilon, grad, indices,
},
Attrs: attrs,
}
return scope.AddOperation(opspec)
}
-// SvdAttr is an optional argument to Svd.
-type SvdAttr func(optionalAttr)
+// Returns the truth value of (x > y) element-wise.
+//
+// *NOTE*: `Greater` supports broadcasting. More about broadcasting
+// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+func Greater(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "Greater",
+ Input: []tf.Input{
+ x, y,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
-// SvdComputeUv sets the optional compute_uv attribute to value.
+// SampleDistortedBoundingBoxAttr is an optional argument to SampleDistortedBoundingBox.
+type SampleDistortedBoundingBoxAttr func(optionalAttr)
+
+// SampleDistortedBoundingBoxSeed sets the optional seed attribute to value.
//
-// value: If true, left and right singular vectors will be
-// computed and returned in `u` and `v`, respectively.
-// If false, `u` and `v` are not set and should never referenced.
-// If not specified, defaults to true
-func SvdComputeUv(value bool) SvdAttr {
+// value: If either `seed` or `seed2` are set to non-zero, the random number
+// generator is seeded by the given `seed`. Otherwise, it is seeded by a random
+// seed.
+// If not specified, defaults to 0
+func SampleDistortedBoundingBoxSeed(value int64) SampleDistortedBoundingBoxAttr {
return func(m optionalAttr) {
- m["compute_uv"] = value
+ m["seed"] = value
}
}
-// SvdFullMatrices sets the optional full_matrices attribute to value.
+// SampleDistortedBoundingBoxSeed2 sets the optional seed2 attribute to value.
//
-// value: If true, compute full-sized `u` and `v`. If false
-// (the default), compute only the leading `P` singular vectors.
-// Ignored if `compute_uv` is `False`.
+// value: A second seed to avoid seed collision.
+// If not specified, defaults to 0
+func SampleDistortedBoundingBoxSeed2(value int64) SampleDistortedBoundingBoxAttr {
+ return func(m optionalAttr) {
+ m["seed2"] = value
+ }
+}
+
+// SampleDistortedBoundingBoxMinObjectCovered sets the optional min_object_covered attribute to value.
+//
+// value: The cropped area of the image must contain at least this
+// fraction of any bounding box supplied. The value of this parameter should be
+// non-negative. In the case of 0, the cropped area does not need to overlap
+// any of the bounding boxes supplied.
+// If not specified, defaults to 0.1
+func SampleDistortedBoundingBoxMinObjectCovered(value float32) SampleDistortedBoundingBoxAttr {
+ return func(m optionalAttr) {
+ m["min_object_covered"] = value
+ }
+}
+
+// SampleDistortedBoundingBoxAspectRatioRange sets the optional aspect_ratio_range attribute to value.
+//
+// value: The cropped area of the image must have an aspect ratio =
+// width / height within this range.
+// If not specified, defaults to <f:0.75 f:1.33 >
+func SampleDistortedBoundingBoxAspectRatioRange(value []float32) SampleDistortedBoundingBoxAttr {
+ return func(m optionalAttr) {
+ m["aspect_ratio_range"] = value
+ }
+}
+
+// SampleDistortedBoundingBoxAreaRange sets the optional area_range attribute to value.
+//
+// value: The cropped area of the image must contain a fraction of the
+// supplied image within in this range.
+// If not specified, defaults to <f:0.05 f:1 >
+func SampleDistortedBoundingBoxAreaRange(value []float32) SampleDistortedBoundingBoxAttr {
+ return func(m optionalAttr) {
+ m["area_range"] = value
+ }
+}
+
+// SampleDistortedBoundingBoxMaxAttempts sets the optional max_attempts attribute to value.
+//
+// value: Number of attempts at generating a cropped region of the image
+// of the specified constraints. After `max_attempts` failures, return the entire
+// image.
+// If not specified, defaults to 100
+func SampleDistortedBoundingBoxMaxAttempts(value int64) SampleDistortedBoundingBoxAttr {
+ return func(m optionalAttr) {
+ m["max_attempts"] = value
+ }
+}
+
+// SampleDistortedBoundingBoxUseImageIfNoBoundingBoxes sets the optional use_image_if_no_bounding_boxes attribute to value.
+//
+// value: Controls behavior if no bounding boxes supplied.
+// If true, assume an implicit bounding box covering the whole input. If false,
+// raise an error.
// If not specified, defaults to false
-func SvdFullMatrices(value bool) SvdAttr {
+func SampleDistortedBoundingBoxUseImageIfNoBoundingBoxes(value bool) SampleDistortedBoundingBoxAttr {
return func(m optionalAttr) {
- m["full_matrices"] = value
+ m["use_image_if_no_bounding_boxes"] = value
}
}
-// Computes the singular value decompositions of one or more matrices.
+// Generate a single randomly distorted bounding box for an image.
//
-// Computes the SVD of each inner matrix in `input` such that
-// `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])`
+// Bounding box annotations are often supplied in addition to ground-truth labels
+// in image recognition or object localization tasks. A common technique for
+// training such a system is to randomly distort an image while preserving
+// its content, i.e. *data augmentation*. This Op outputs a randomly distorted
+// localization of an object, i.e. bounding box, given an `image_size`,
+// `bounding_boxes` and a series of constraints.
+//
+// The output of this Op is a single bounding box that may be used to crop the
+// original image. The output is returned as 3 tensors: `begin`, `size` and
+// `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
+// image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize
+// what the bounding box looks like.
+//
+// Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The
+// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
+// height of the underlying image.
+//
+// For example,
//
// ```python
-// # a is a tensor containing a batch of matrices.
-// # s is a tensor of singular values for each matrix.
-// # u is the tensor containing of left singular vectors for each matrix.
-// # v is the tensor containing of right singular vectors for each matrix.
-// s, u, v = svd(a)
-// s, _, _ = svd(a, compute_uv=False)
+// # Generate a single distorted bounding box.
+// begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
+// tf.shape(image),
+// bounding_boxes=bounding_boxes)
+//
+// # Draw the bounding box in an image summary.
+// image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
+// bbox_for_draw)
+// tf.summary.image('images_with_box', image_with_box)
+//
+// # Employ the bounding box to distort the image.
+// distorted_image = tf.slice(image, begin, size)
// ```
//
+// Note that if no bounding box information is available, setting
+// `use_image_if_no_bounding_boxes = true` will assume there is a single implicit
+// bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
+// false and no bounding boxes are supplied, an error is raised.
+//
// Arguments:
-// input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
-// form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.
+// image_size: 1-D, containing `[height, width, channels]`.
+// bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes
+// associated with the image.
//
-// Returns Singular values. Shape is `[..., P]`.Left singular vectors. If `full_matrices` is `False` then shape is
-// `[..., M, P]`; if `full_matrices` is `True` then shape is
-// `[..., M, M]`. Undefined if `compute_uv` is `False`.Left singular vectors. If `full_matrices` is `False` then shape is
-// `[..., N, P]`. If `full_matrices` is `True` then shape is `[..., N, N]`.
-// Undefined if `compute_uv` is false.
-func Svd(scope *Scope, input tf.Output, optional ...SvdAttr) (s tf.Output, u tf.Output, v tf.Output) {
+// Returns 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to
+// `tf.slice`.1-D, containing `[target_height, target_width, -1]`. Provide as input to
+// `tf.slice`.3-D with shape `[1, 1, 4]` containing the distorted bounding box.
+// Provide as input to `tf.image.draw_bounding_boxes`.
+func SampleDistortedBoundingBox(scope *Scope, image_size tf.Output, bounding_boxes tf.Output, optional ...SampleDistortedBoundingBoxAttr) (begin tf.Output, size tf.Output, bboxes tf.Output) {
if scope.Err() != nil {
return
}
@@ -7907,9 +8168,9 @@ func Svd(scope *Scope, input tf.Output, optional ...SvdAttr) (s tf.Output, u tf.
a(attrs)
}
opspec := tf.OpSpec{
- Type: "Svd",
+ Type: "SampleDistortedBoundingBox",
Input: []tf.Input{
- input,
+ image_size, bounding_boxes,
},
Attrs: attrs,
}
@@ -7917,171 +8178,157 @@ func Svd(scope *Scope, input tf.Output, optional ...SvdAttr) (s tf.Output, u tf.
return op.Output(0), op.Output(1), op.Output(2)
}
-// Converts one or more images from RGB to HSV.
-//
-// Outputs a tensor of the same shape as the `images` tensor, containing the HSV
-// value of the pixels. The output is only well defined if the value in `images`
-// are in `[0,1]`.
-//
-// `output[..., 0]` contains hue, `output[..., 1]` contains saturation, and
-// `output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0
-// corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue.
+// Returns x / y element-wise for integer types.
//
-// Arguments:
-// images: 1-D or higher rank. RGB data to convert. Last dimension must be size 3.
+// Truncation designates that negative numbers will round fractional quantities
+// toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different
+// than Python semantics. See `FloorDiv` for a division function that matches
+// Python Semantics.
//
-// Returns `images` converted to HSV.
-func RGBToHSV(scope *Scope, images tf.Output) (output tf.Output) {
+// *NOTE*: `TruncateDiv` supports broadcasting. More about broadcasting
+// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+func TruncateDiv(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "RGBToHSV",
+ Type: "TruncateDiv",
Input: []tf.Input{
- images,
+ x, y,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// MatrixSolveLsAttr is an optional argument to MatrixSolveLs.
-type MatrixSolveLsAttr func(optionalAttr)
-
-// MatrixSolveLsFast sets the optional fast attribute to value.
-// If not specified, defaults to true
-func MatrixSolveLsFast(value bool) MatrixSolveLsAttr {
- return func(m optionalAttr) {
- m["fast"] = value
- }
-}
-
-// Solves one or more linear least-squares problems.
-//
-// `matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions
-// form real or complex matrices of size `[M, N]`. `Rhs` is a tensor of the same
-// type as `matrix` and shape `[..., M, K]`.
-// The output is a tensor shape `[..., N, K]` where each output matrix solves
-// each of the equations
-// `matrix[..., :, :]` * `output[..., :, :]` = `rhs[..., :, :]`
-// in the least squares sense.
-//
-// We use the following notation for (complex) matrix and right-hand sides
-// in the batch:
+// Restores tensors from a V2 checkpoint.
//
-// `matrix`=\\(A \in \mathbb{C}^{m \times n}\\),
-// `rhs`=\\(B \in \mathbb{C}^{m \times k}\\),
-// `output`=\\(X \in \mathbb{C}^{n \times k}\\),
-// `l2_regularizer`=\\(\lambda \in \mathbb{R}\\).
+// For backward compatibility with the V1 format, this Op currently allows
+// restoring from a V1 checkpoint as well:
+// - This Op first attempts to find the V2 index file pointed to by "prefix", and
+// if found proceed to read it as a V2 checkpoint;
+// - Otherwise the V1 read path is invoked.
+// Relying on this behavior is not recommended, as the ability to fall back to read
+// V1 might be deprecated and eventually removed.
//
-// If `fast` is `True`, then the solution is computed by solving the normal
-// equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then
-// \\(X = (A^H A + \lambda I)^{-1} A^H B\\), which solves the least-squares
-// problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k} } ||A Z - B||_F^2 +
-// \lambda ||Z||_F^2\\). If \\(m \lt n\\) then `output` is computed as
-// \\(X = A^H (A A^H + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is the
-// minimum-norm solution to the under-determined linear system, i.e.
-// \\(X = \mathrm{argmin}_{Z \in \mathbb{C}^{n \times k} } ||Z||_F^2 \\),
-// subject to \\(A Z = B\\). Notice that the fast path is only numerically stable
-// when \\(A\\) is numerically full rank and has a condition number
-// \\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach} } }\\) or\\(\lambda\\) is
-// sufficiently large.
+// By default, restores the named tensors in full. If the caller wishes to restore
+// specific slices of stored tensors, "shape_and_slices" should be non-empty
+// strings and correspondingly well-formed.
//
-// If `fast` is `False` an algorithm based on the numerically robust complete
-// orthogonal decomposition is used. This computes the minimum-norm
-// least-squares solution, even when \\(A\\) is rank deficient. This path is
-// typically 6-7 times slower than the fast path. If `fast` is `False` then
-// `l2_regularizer` is ignored.
+// Callers must ensure all the named tensors are indeed stored in the checkpoint.
//
// Arguments:
-// matrix: Shape is `[..., M, N]`.
-// rhs: Shape is `[..., M, K]`.
-// l2_regularizer: Scalar tensor.
-//
-// @compatibility(numpy)
-// Equivalent to np.linalg.lstsq
-// @end_compatibility
+// prefix: Must have a single element. The prefix of a V2 checkpoint.
+// tensor_names: shape {N}. The names of the tensors to be restored.
+// shape_and_slices: shape {N}. The slice specs of the tensors to be restored.
+// Empty strings indicate that they are non-partitioned tensors.
+// dtypes: shape {N}. The list of expected dtype for the tensors. Must match
+// those stored in the checkpoint.
//
-// Returns Shape is `[..., N, K]`.
-func MatrixSolveLs(scope *Scope, matrix tf.Output, rhs tf.Output, l2_regularizer tf.Output, optional ...MatrixSolveLsAttr) (output tf.Output) {
+// Returns shape {N}. The restored tensors, whose shapes are read from the
+// checkpoint directly.
+func RestoreV2(scope *Scope, prefix tf.Output, tensor_names tf.Output, shape_and_slices tf.Output, dtypes []tf.DataType) (tensors []tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
+ attrs := map[string]interface{}{"dtypes": dtypes}
opspec := tf.OpSpec{
- Type: "MatrixSolveLs",
+ Type: "RestoreV2",
Input: []tf.Input{
- matrix, rhs, l2_regularizer,
+ prefix, tensor_names, shape_and_slices,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ if scope.Err() != nil {
+ return
+ }
+ var idx int
+ var err error
+ if tensors, idx, err = makeOutputList(op, idx, "tensors"); err != nil {
+ scope.UpdateErr("RestoreV2", err)
+ return
+ }
+ return tensors
}
-// Adjust the saturation of one or more images.
+// Decode web-safe base64-encoded strings.
//
-// `images` is a tensor of at least 3 dimensions. The last dimension is
-// interpretted as channels, and must be three.
+// Input may or may not have padding at the end. See EncodeBase64 for padding.
+// Web-safe means that input must use - and _ instead of + and /.
//
-// The input image is considered in the RGB colorspace. Conceptually, the RGB
-// colors are first mapped into HSV. A scale is then applied all the saturation
-// values, and then remapped back to RGB colorspace.
+// Arguments:
+// input: Base64 strings to decode.
+//
+// Returns Decoded strings.
+func DecodeBase64(scope *Scope, input tf.Output) (output tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "DecodeBase64",
+ Input: []tf.Input{
+ input,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
+// Store the input tensor in the state of the current session.
//
// Arguments:
-// images: Images to adjust. At least 3-D.
-// scale: A float scale to add to the saturation.
+// value: The tensor to be stored.
//
-// Returns The hue-adjusted image or images.
-func AdjustSaturation(scope *Scope, images tf.Output, scale tf.Output) (output tf.Output) {
+// Returns The handle for the tensor stored in the session state, represented
+// as a string.
+func GetSessionHandle(scope *Scope, value tf.Output) (handle tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "AdjustSaturation",
+ Type: "GetSessionHandle",
Input: []tf.Input{
- images, scale,
+ value,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// SelfAdjointEigV2Attr is an optional argument to SelfAdjointEigV2.
-type SelfAdjointEigV2Attr func(optionalAttr)
+// ResourceSparseApplyProximalAdagradAttr is an optional argument to ResourceSparseApplyProximalAdagrad.
+type ResourceSparseApplyProximalAdagradAttr func(optionalAttr)
-// SelfAdjointEigV2ComputeV sets the optional compute_v attribute to value.
+// ResourceSparseApplyProximalAdagradUseLocking sets the optional use_locking attribute to value.
//
-// value: If `True` then eigenvectors will be computed and returned in `v`.
-// Otherwise, only the eigenvalues will be computed.
-// If not specified, defaults to true
-func SelfAdjointEigV2ComputeV(value bool) SelfAdjointEigV2Attr {
+// value: If True, updating of the var and accum tensors will be protected by
+// a lock; otherwise the behavior is undefined, but may exhibit less contention.
+// If not specified, defaults to false
+func ResourceSparseApplyProximalAdagradUseLocking(value bool) ResourceSparseApplyProximalAdagradAttr {
return func(m optionalAttr) {
- m["compute_v"] = value
+ m["use_locking"] = value
}
}
-// Computes the eigen decomposition of one or more square self-adjoint matrices.
-//
-// Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in
-// `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`.
+// Sparse update entries in '*var' and '*accum' according to FOBOS algorithm.
//
-// ```python
-// # a is a tensor.
-// # e is a tensor of eigenvalues.
-// # v is a tensor of eigenvectors.
-// e, v = self_adjoint_eig(a)
-// e = self_adjoint_eig(a, compute_v=False)
-// ```
+// That is for rows we have grad for, we update var and accum as follows:
+// accum += grad * grad
+// prox_v = var
+// prox_v -= lr * grad * (1 / sqrt(accum))
+// var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
//
// Arguments:
-// input: `Tensor` input of shape `[N, N]`.
+// var_: Should be from a Variable().
+// accum: Should be from a Variable().
+// lr: Learning rate. Must be a scalar.
+// l1: L1 regularization. Must be a scalar.
+// l2: L2 regularization. Must be a scalar.
+// grad: The gradient.
+// indices: A vector of indices into the first dimension of var and accum.
//
-// Returns Eigenvalues. Shape is `[N]`.Eigenvectors. Shape is `[N, N]`.
-func SelfAdjointEigV2(scope *Scope, input tf.Output, optional ...SelfAdjointEigV2Attr) (e tf.Output, v tf.Output) {
+// Returns the created operation.
+func ResourceSparseApplyProximalAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyProximalAdagradAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
@@ -8090,201 +8337,157 @@ func SelfAdjointEigV2(scope *Scope, input tf.Output, optional ...SelfAdjointEigV
a(attrs)
}
opspec := tf.OpSpec{
- Type: "SelfAdjointEigV2",
+ Type: "ResourceSparseApplyProximalAdagrad",
Input: []tf.Input{
- input,
+ var_, accum, lr, l1, l2, grad, indices,
},
Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
+ return scope.AddOperation(opspec)
}
-// Computes the Eigen Decomposition of a batch of square self-adjoint matrices.
-//
-// DEPRECATED at GraphDef version 11: Use SelfAdjointEigV2 instead.
-//
-// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
-// form square matrices, with the same constraints as the single matrix
-// SelfAdjointEig.
-//
-// The result is a [..., M+1, M] matrix with [..., 0,:] containing the
-// eigenvalues, and subsequent [...,1:, :] containing the eigenvectors.
-//
-// Arguments:
-// input: Shape is `[..., M, M]`.
-//
-// Returns Shape is `[..., M+1, M]`.
-func SelfAdjointEig(scope *Scope, input tf.Output) (output tf.Output) {
+// Returns element-wise largest integer not greater than x.
+func Floor(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "SelfAdjointEig",
+ Type: "Floor",
Input: []tf.Input{
- input,
+ x,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Writes contents to the file at input filename. Creates file and recursively
-//
-// creates directory if not existing.
-//
-// Arguments:
-// filename: scalar. The name of the file to which we write the contents.
-// contents: scalar. The content to be written to the output file.
-//
-// Returns the created operation.
-func WriteFile(scope *Scope, filename tf.Output, contents tf.Output) (o *tf.Operation) {
+// Computes the Gauss error function of `x` element-wise.
+func Erf(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "WriteFile",
+ Type: "Erf",
Input: []tf.Input{
- filename, contents,
+ x,
},
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Computes the Cholesky decomposition of one or more square matrices.
-//
-// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
-// form square matrices.
-//
-// The input has to be symmetric and positive definite. Only the lower-triangular
-// part of the input will be used for this operation. The upper-triangular part
-// will not be read.
+// Reads the value of a variable.
//
-// The output is a tensor of the same shape as the input
-// containing the Cholesky decompositions for all input submatrices `[..., :, :]`.
+// The tensor returned by this operation is immutable.
//
-// **Note**: The gradient computation on GPU is faster for large matrices but
-// not for large batch dimensions when the submatrices are small. In this
-// case it might be faster to use the CPU.
+// The value returned by this operation is guaranteed to be influenced by all the
+// writes on which this operation depends directly or indirectly, and to not be
+// influenced by any of the writes which depend directly or indirectly on this
+// operation.
//
// Arguments:
-// input: Shape is `[..., M, M]`.
-//
-// Returns Shape is `[..., M, M]`.
-func Cholesky(scope *Scope, input tf.Output) (output tf.Output) {
+// resource: handle to the resource in which to store the variable.
+// dtype: the dtype of the value.
+func ReadVariableOp(scope *Scope, resource tf.Output, dtype tf.DataType) (value tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"dtype": dtype}
opspec := tf.OpSpec{
- Type: "Cholesky",
+ Type: "ReadVariableOp",
Input: []tf.Input{
- input,
+ resource,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Computes the matrix exponential of one or more square matrices:
-//
-// exp(A) = \sum_{n=0}^\infty A^n/n!
-//
-// The exponential is computed using a combination of the scaling and squaring
-// method and the Pade approximation. Details can be founds in:
-// Nicholas J. Higham, "The scaling and squaring method for the matrix exponential
-// revisited," SIAM J. Matrix Anal. Applic., 26:1179-1193, 2005.
-//
-// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
-// form square matrices. The output is a tensor of the same shape as the input
-// containing the exponential for all input submatrices `[..., :, :]`.
-//
-// Arguments:
-// input: Shape is `[..., M, M]`.
-//
-// Returns Shape is `[..., M, M]`.
+// MaxPool3DGradAttr is an optional argument to MaxPool3DGrad.
+type MaxPool3DGradAttr func(optionalAttr)
+
+// MaxPool3DGradDataFormat sets the optional data_format attribute to value.
//
-// @compatibility(scipy)
-// Equivalent to scipy.linalg.expm
-// @end_compatibility
-func MatrixExponential(scope *Scope, input tf.Output) (output tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "MatrixExponential",
- Input: []tf.Input{
- input,
- },
+// value: The data format of the input and output data. With the
+// default format "NDHWC", the data is stored in the order of:
+// [batch, in_depth, in_height, in_width, in_channels].
+// Alternatively, the format could be "NCDHW", the data storage order is:
+// [batch, in_channels, in_depth, in_height, in_width].
+// If not specified, defaults to "NDHWC"
+func MaxPool3DGradDataFormat(value string) MaxPool3DGradAttr {
+ return func(m optionalAttr) {
+ m["data_format"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// Merges summaries.
-//
-// This op creates a
-// [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
-// protocol buffer that contains the union of all the values in the input
-// summaries.
-//
-// When the Op is run, it reports an `InvalidArgument` error if multiple values
-// in the summaries to merge use the same tag.
+// Computes gradients of max pooling function.
//
// Arguments:
-// inputs: Can be of any shape. Each must contain serialized `Summary` protocol
-// buffers.
-//
-// Returns Scalar. Serialized `Summary` protocol buffer.
-func MergeSummary(scope *Scope, inputs []tf.Output) (summary tf.Output) {
+// orig_input: The original input tensor.
+// orig_output: The original output tensor.
+// grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.
+// ksize: 1-D tensor of length 5. The size of the window for each dimension of
+// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
+// strides: 1-D tensor of length 5. The stride of the sliding window for each
+// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
+// padding: The type of padding algorithm to use.
+func MaxPool3DGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPool3DGradAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "MergeSummary",
+ Type: "MaxPool3DGrad",
Input: []tf.Input{
- tf.OutputList(inputs),
+ orig_input, orig_output, grad,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// AudioSummaryV2Attr is an optional argument to AudioSummaryV2.
-type AudioSummaryV2Attr func(optionalAttr)
+// SparseReduceSumAttr is an optional argument to SparseReduceSum.
+type SparseReduceSumAttr func(optionalAttr)
-// AudioSummaryV2MaxOutputs sets the optional max_outputs attribute to value.
-//
-// value: Max number of batch elements to generate audio for.
-// If not specified, defaults to 3
+// SparseReduceSumKeepDims sets the optional keep_dims attribute to value.
//
-// REQUIRES: value >= 1
-func AudioSummaryV2MaxOutputs(value int64) AudioSummaryV2Attr {
+// value: If true, retain reduced dimensions with length 1.
+// If not specified, defaults to false
+func SparseReduceSumKeepDims(value bool) SparseReduceSumAttr {
return func(m optionalAttr) {
- m["max_outputs"] = value
+ m["keep_dims"] = value
}
}
-// Outputs a `Summary` protocol buffer with audio.
+// Computes the sum of elements across dimensions of a SparseTensor.
//
-// The summary has up to `max_outputs` summary values containing audio. The
-// audio is built from `tensor` which must be 3-D with shape `[batch_size,
-// frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
-// assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
+// This Op takes a SparseTensor and is the sparse counterpart to
+// `tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor`
+// instead of a sparse one.
//
-// The `tag` argument is a scalar `Tensor` of type `string`. It is used to
-// build the `tag` of the summary values:
+// Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
+// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
+// with length 1.
//
-// * If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
-// * If `max_outputs` is greater than 1, the summary value tags are
-// generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
+// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
+// with a single element is returned. Additionally, the axes can be negative,
+// which are interpreted according to the indexing rules in Python.
//
// Arguments:
-// tag: Scalar. Used to build the `tag` attribute of the summary values.
-// tensor: 2-D of shape `[batch_size, frames]`.
-// sample_rate: The sample rate of the signal in hertz.
+// input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
+// SparseTensor, possibly not in canonical ordering.
+// input_values: 1-D. `N` non-empty values corresponding to `input_indices`.
+// input_shape: 1-D. Shape of the input SparseTensor.
+// reduction_axes: 1-D. Length-`K` vector containing the reduction axes.
//
-// Returns Scalar. Serialized `Summary` protocol buffer.
-func AudioSummaryV2(scope *Scope, tag tf.Output, tensor tf.Output, sample_rate tf.Output, optional ...AudioSummaryV2Attr) (summary tf.Output) {
+// Returns `R-K`-D. The reduced Tensor.
+func SparseReduceSum(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceSumAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -8293,9 +8496,9 @@ func AudioSummaryV2(scope *Scope, tag tf.Output, tensor tf.Output, sample_rate t
a(attrs)
}
opspec := tf.OpSpec{
- Type: "AudioSummaryV2",
+ Type: "SparseReduceSum",
Input: []tf.Input{
- tag, tensor, sample_rate,
+ input_indices, input_values, input_shape, reduction_axes,
},
Attrs: attrs,
}
@@ -8303,74 +8506,34 @@ func AudioSummaryV2(scope *Scope, tag tf.Output, tensor tf.Output, sample_rate t
return op.Output(0)
}
-// ImageSummaryAttr is an optional argument to ImageSummary.
-type ImageSummaryAttr func(optionalAttr)
-
-// ImageSummaryMaxImages sets the optional max_images attribute to value.
-//
-// value: Max number of batch elements to generate images for.
-// If not specified, defaults to 3
-//
-// REQUIRES: value >= 1
-func ImageSummaryMaxImages(value int64) ImageSummaryAttr {
- return func(m optionalAttr) {
- m["max_images"] = value
- }
-}
+// ResourceApplyAdagradAttr is an optional argument to ResourceApplyAdagrad.
+type ResourceApplyAdagradAttr func(optionalAttr)
-// ImageSummaryBadColor sets the optional bad_color attribute to value.
+// ResourceApplyAdagradUseLocking sets the optional use_locking attribute to value.
//
-// value: Color to use for pixels with non-finite values.
-// If not specified, defaults to <dtype:DT_UINT8 tensor_shape:<dim:<size:4 > > int_val:255 int_val:0 int_val:0 int_val:255 >
-func ImageSummaryBadColor(value tf.Tensor) ImageSummaryAttr {
+// value: If `True`, updating of the var and accum tensors will be protected
+// by a lock; otherwise the behavior is undefined, but may exhibit less
+// contention.
+// If not specified, defaults to false
+func ResourceApplyAdagradUseLocking(value bool) ResourceApplyAdagradAttr {
return func(m optionalAttr) {
- m["bad_color"] = value
+ m["use_locking"] = value
}
}
-// Outputs a `Summary` protocol buffer with images.
-//
-// The summary has up to `max_images` summary values containing images. The
-// images are built from `tensor` which must be 4-D with shape `[batch_size,
-// height, width, channels]` and where `channels` can be:
-//
-// * 1: `tensor` is interpreted as Grayscale.
-// * 3: `tensor` is interpreted as RGB.
-// * 4: `tensor` is interpreted as RGBA.
-//
-// The images have the same number of channels as the input tensor. For float
-// input, the values are normalized one image at a time to fit in the range
-// `[0, 255]`. `uint8` values are unchanged. The op uses two different
-// normalization algorithms:
-//
-// * If the input values are all positive, they are rescaled so the largest one
-// is 255.
-//
-// * If any input value is negative, the values are shifted so input value 0.0
-// is at 127. They are then rescaled so that either the smallest value is 0,
-// or the largest one is 255.
-//
-// The `tag` argument is a scalar `Tensor` of type `string`. It is used to
-// build the `tag` of the summary values:
-//
-// * If `max_images` is 1, the summary value tag is '*tag*/image'.
-// * If `max_images` is greater than 1, the summary value tags are
-// generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
+// Update '*var' according to the adagrad scheme.
//
-// The `bad_color` argument is the color to use in the generated images for
-// non-finite input values. It is a `unit8` 1-D tensor of length `channels`.
-// Each element must be in the range `[0, 255]` (It represents the value of a
-// pixel in the output image). Non-finite values in the input tensor are
-// replaced by this tensor in the output image. The default value is the color
-// red.
+// accum += grad * grad
+// var -= lr * grad * (1 / sqrt(accum))
//
// Arguments:
-// tag: Scalar. Used to build the `tag` attribute of the summary values.
-// tensor: 4-D of shape `[batch_size, height, width, channels]` where
-// `channels` is 1, 3, or 4.
+// var_: Should be from a Variable().
+// accum: Should be from a Variable().
+// lr: Scaling factor. Must be a scalar.
+// grad: The gradient.
//
-// Returns Scalar. Serialized `Summary` protocol buffer.
-func ImageSummary(scope *Scope, tag tf.Output, tensor tf.Output, optional ...ImageSummaryAttr) (summary tf.Output) {
+// Returns the created operation.
+func ResourceApplyAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, optional ...ResourceApplyAdagradAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
@@ -8379,353 +8542,448 @@ func ImageSummary(scope *Scope, tag tf.Output, tensor tf.Output, optional ...Ima
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ImageSummary",
+ Type: "ResourceApplyAdagrad",
Input: []tf.Input{
- tag, tensor,
+ var_, accum, lr, grad,
},
Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// Computes the number of elements in the given queue.
+// Returns element-wise remainder of division. This emulates C semantics in that
//
-// Arguments:
-// handle: The handle to a queue.
+// the result here is consistent with a truncating divide. E.g. `truncate(x / y) *
+// y + truncate_mod(x, y) = x`.
//
-// Returns The number of elements in the given queue.
-func QueueSizeV2(scope *Scope, handle tf.Output) (size tf.Output) {
+// *NOTE*: `TruncateMod` supports broadcasting. More about broadcasting
+// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+func TruncateMod(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "QueueSizeV2",
+ Type: "TruncateMod",
Input: []tf.Input{
- handle,
+ x, y,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Outputs a `Summary` protocol buffer with a histogram.
+// Inverse 2D real-valued fast Fourier transform.
//
-// The generated
-// [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
-// has one summary value containing a histogram for `values`.
+// Computes the inverse 2-dimensional discrete Fourier transform of a real-valued
+// signal over the inner-most 2 dimensions of `input`.
//
-// This op reports an `InvalidArgument` error if any value is not finite.
+// The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`:
+// The inner-most dimension contains the `fft_length / 2 + 1` unique components of
+// the DFT of a real-valued signal. If `fft_length` is not provided, it is computed
+// from the size of the inner-most 2 dimensions of `input`. If the FFT length used
+// to compute `input` is odd, it should be provided since it cannot be inferred
+// properly.
+//
+// Along each axis `IRFFT2D` is computed on, if `fft_length` (or
+// `fft_length / 2 + 1` for the inner-most dimension) is smaller than the
+// corresponding dimension of `input`, the dimension is cropped. If it is larger,
+// the dimension is padded with zeros.
//
// Arguments:
-// tag: Scalar. Tag to use for the `Summary.Value`.
-// values: Any shape. Values to use to build the histogram.
+// input: A complex64 tensor.
+// fft_length: An int32 tensor of shape [2]. The FFT length for each dimension.
//
-// Returns Scalar. Serialized `Summary` protocol buffer.
-func HistogramSummary(scope *Scope, tag tf.Output, values tf.Output) (summary tf.Output) {
+// Returns A float32 tensor of the same rank as `input`. The inner-most 2
+// dimensions of `input` are replaced with the `fft_length` samples of their
+// inverse 2D Fourier transform.
+//
+// @compatibility(numpy)
+// Equivalent to np.fft.irfft2
+// @end_compatibility
+func IRFFT2D(scope *Scope, input tf.Output, fft_length tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "HistogramSummary",
+ Type: "IRFFT2D",
Input: []tf.Input{
- tag, values,
+ input, fft_length,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// RandomShuffleQueueV2Attr is an optional argument to RandomShuffleQueueV2.
-type RandomShuffleQueueV2Attr func(optionalAttr)
-
-// RandomShuffleQueueV2Shapes sets the optional shapes attribute to value.
+// Compute the pairwise cross product.
//
-// value: The shape of each component in a value. The length of this attr must
-// be either 0 or the same as the length of component_types. If the length of
-// this attr is 0, the shapes of queue elements are not constrained, and
-// only one element may be dequeued at a time.
-// If not specified, defaults to <>
+// `a` and `b` must be the same shape; they can either be simple 3-element vectors,
+// or any shape where the innermost dimension is 3. In the latter case, each pair
+// of corresponding 3-element vectors is cross-multiplied independently.
//
-// REQUIRES: len(value) >= 0
-func RandomShuffleQueueV2Shapes(value []tf.Shape) RandomShuffleQueueV2Attr {
- return func(m optionalAttr) {
- m["shapes"] = value
- }
-}
-
-// RandomShuffleQueueV2Capacity sets the optional capacity attribute to value.
+// Arguments:
+// a: A tensor containing 3-element vectors.
+// b: Another tensor, of same type and shape as `a`.
//
-// value: The upper bound on the number of elements in this queue.
-// Negative numbers mean no limit.
-// If not specified, defaults to -1
-func RandomShuffleQueueV2Capacity(value int64) RandomShuffleQueueV2Attr {
- return func(m optionalAttr) {
- m["capacity"] = value
+// Returns Pairwise cross product of the vectors in `a` and `b`.
+func Cross(scope *Scope, a tf.Output, b tf.Output) (product tf.Output) {
+ if scope.Err() != nil {
+ return
}
-}
-
-// RandomShuffleQueueV2MinAfterDequeue sets the optional min_after_dequeue attribute to value.
-//
-// value: Dequeue will block unless there would be this
-// many elements after the dequeue or the queue is closed. This
-// ensures a minimum level of mixing of elements.
-// If not specified, defaults to 0
-func RandomShuffleQueueV2MinAfterDequeue(value int64) RandomShuffleQueueV2Attr {
- return func(m optionalAttr) {
- m["min_after_dequeue"] = value
+ opspec := tf.OpSpec{
+ Type: "Cross",
+ Input: []tf.Input{
+ a, b,
+ },
}
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// RandomShuffleQueueV2Seed sets the optional seed attribute to value.
+// Transforms a vector of brain.Example protos (as strings) into typed tensors.
//
-// value: If either seed or seed2 is set to be non-zero, the random number
-// generator is seeded by the given seed. Otherwise, a random seed is used.
-// If not specified, defaults to 0
-func RandomShuffleQueueV2Seed(value int64) RandomShuffleQueueV2Attr {
- return func(m optionalAttr) {
- m["seed"] = value
+// Arguments:
+// serialized: A vector containing a batch of binary serialized Example protos.
+// names: A vector containing the names of the serialized protos.
+// May contain, for example, table key (descriptive) names for the
+// corresponding serialized protos. These are purely useful for debugging
+// purposes, and the presence of values here has no effect on the output.
+// May also be an empty vector if no names are available.
+// If non-empty, this vector must be the same length as "serialized".
+// sparse_keys: A list of Nsparse string Tensors (scalars).
+// The keys expected in the Examples' features associated with sparse values.
+// dense_keys: A list of Ndense string Tensors (scalars).
+// The keys expected in the Examples' features associated with dense values.
+// dense_defaults: A list of Ndense Tensors (some may be empty).
+// dense_defaults[j] provides default values
+// when the example's feature_map lacks dense_key[j]. If an empty Tensor is
+// provided for dense_defaults[j], then the Feature dense_keys[j] is required.
+// The input type is inferred from dense_defaults[j], even when it's empty.
+// If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined,
+// then the shape of dense_defaults[j] must match that of dense_shapes[j].
+// If dense_shapes[j] has an undefined major dimension (variable strides dense
+// feature), dense_defaults[j] must contain a single element:
+// the padding element.
+// sparse_types: A list of Nsparse types; the data types of data in each Feature
+// given in sparse_keys.
+// Currently the ParseExample supports DT_FLOAT (FloatList),
+// DT_INT64 (Int64List), and DT_STRING (BytesList).
+// dense_shapes: A list of Ndense shapes; the shapes of data in each Feature
+// given in dense_keys.
+// The number of elements in the Feature corresponding to dense_key[j]
+// must always equal dense_shapes[j].NumEntries().
+// If dense_shapes[j] == (D0, D1, ..., DN) then the shape of output
+// Tensor dense_values[j] will be (|serialized|, D0, D1, ..., DN):
+// The dense outputs are just the inputs row-stacked by batch.
+// This works for dense_shapes[j] = (-1, D1, ..., DN). In this case
+// the shape of the output Tensor dense_values[j] will be
+// (|serialized|, M, D1, .., DN), where M is the maximum number of blocks
+// of elements of length D1 * .... * DN, across all minibatch entries
+// in the input. Any minibatch entry with less than M blocks of elements of
+// length D1 * ... * DN will be padded with the corresponding default_value
+// scalar element along the second dimension.
+func ParseExample(scope *Scope, serialized tf.Output, names tf.Output, sparse_keys []tf.Output, dense_keys []tf.Output, dense_defaults []tf.Output, sparse_types []tf.DataType, dense_shapes []tf.Shape) (sparse_indices []tf.Output, sparse_values []tf.Output, sparse_shapes []tf.Output, dense_values []tf.Output) {
+ if scope.Err() != nil {
+ return
}
-}
-
-// RandomShuffleQueueV2Seed2 sets the optional seed2 attribute to value.
-//
-// value: A second seed to avoid seed collision.
-// If not specified, defaults to 0
-func RandomShuffleQueueV2Seed2(value int64) RandomShuffleQueueV2Attr {
- return func(m optionalAttr) {
- m["seed2"] = value
+ attrs := map[string]interface{}{"sparse_types": sparse_types, "dense_shapes": dense_shapes}
+ opspec := tf.OpSpec{
+ Type: "ParseExample",
+ Input: []tf.Input{
+ serialized, names, tf.OutputList(sparse_keys), tf.OutputList(dense_keys), tf.OutputList(dense_defaults),
+ },
+ Attrs: attrs,
}
-}
-
-// RandomShuffleQueueV2Container sets the optional container attribute to value.
-//
-// value: If non-empty, this queue is placed in the given container.
-// Otherwise, a default container is used.
-// If not specified, defaults to ""
-func RandomShuffleQueueV2Container(value string) RandomShuffleQueueV2Attr {
- return func(m optionalAttr) {
- m["container"] = value
+ op := scope.AddOperation(opspec)
+ if scope.Err() != nil {
+ return
}
+ var idx int
+ var err error
+ if sparse_indices, idx, err = makeOutputList(op, idx, "sparse_indices"); err != nil {
+ scope.UpdateErr("ParseExample", err)
+ return
+ }
+ if sparse_values, idx, err = makeOutputList(op, idx, "sparse_values"); err != nil {
+ scope.UpdateErr("ParseExample", err)
+ return
+ }
+ if sparse_shapes, idx, err = makeOutputList(op, idx, "sparse_shapes"); err != nil {
+ scope.UpdateErr("ParseExample", err)
+ return
+ }
+ if dense_values, idx, err = makeOutputList(op, idx, "dense_values"); err != nil {
+ scope.UpdateErr("ParseExample", err)
+ return
+ }
+ return sparse_indices, sparse_values, sparse_shapes, dense_values
}
-// RandomShuffleQueueV2SharedName sets the optional shared_name attribute to value.
-//
-// value: If non-empty, this queue will be shared under the given name
-// across multiple sessions.
-// If not specified, defaults to ""
-func RandomShuffleQueueV2SharedName(value string) RandomShuffleQueueV2Attr {
+// VariableShapeAttr is an optional argument to VariableShape.
+type VariableShapeAttr func(optionalAttr)
+
+// VariableShapeOutType sets the optional out_type attribute to value.
+// If not specified, defaults to DT_INT32
+func VariableShapeOutType(value tf.DataType) VariableShapeAttr {
return func(m optionalAttr) {
- m["shared_name"] = value
+ m["out_type"] = value
}
}
-// A queue that randomizes the order of elements.
+// Returns the shape of the variable pointed to by `resource`.
//
-// Arguments:
-// component_types: The type of each component in a value.
+// This operation returns a 1-D integer tensor representing the shape of `input`.
//
-// Returns The handle to the queue.
-func RandomShuffleQueueV2(scope *Scope, component_types []tf.DataType, optional ...RandomShuffleQueueV2Attr) (handle tf.Output) {
+// For example:
+//
+// ```
+// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
+// shape(t) ==> [2, 2, 3]
+// ```
+func VariableShape(scope *Scope, input tf.Output, optional ...VariableShapeAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"component_types": component_types}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "RandomShuffleQueueV2",
-
+ Type: "VariableShape",
+ Input: []tf.Input{
+ input,
+ },
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Outputs a `Summary` protocol buffer with scalar values.
+// Fills empty rows in the input 2-D `SparseTensor` with a default value.
//
-// The input `tags` and `values` must have the same shape. The generated summary
-// has a summary value for each tag-value pair in `tags` and `values`.
+// The input `SparseTensor` is represented via the tuple of inputs
+// (`indices`, `values`, `dense_shape`). The output `SparseTensor` has the
+// same `dense_shape` but with indices `output_indices` and values
+// `output_values`.
+//
+// This op inserts a single entry for every row that doesn't have any values.
+// The index is created as `[row, 0, ..., 0]` and the inserted value
+// is `default_value`.
+//
+// For example, suppose `sp_input` has shape `[5, 6]` and non-empty values:
+//
+// [0, 1]: a
+// [0, 3]: b
+// [2, 0]: c
+// [3, 1]: d
+//
+// Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values:
+//
+// [0, 1]: a
+// [0, 3]: b
+// [1, 0]: default_value
+// [2, 0]: c
+// [3, 1]: d
+// [4, 0]: default_value
+//
+// The output `SparseTensor` will be in row-major order and will have the
+// same shape as the input.
+//
+// This op also returns an indicator vector shaped `[dense_shape[0]]` such that
+//
+// empty_row_indicator[i] = True iff row i was an empty row.
+//
+// And a reverse index map vector shaped `[indices.shape[0]]` that is used during
+// backpropagation,
+//
+// reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :]
//
// Arguments:
-// tags: Tags for the summary.
-// values: Same shape as `tags. Values for the summary.
+// indices: 2-D. the indices of the sparse tensor.
+// values: 1-D. the values of the sparse tensor.
+// dense_shape: 1-D. the shape of the sparse tensor.
+// default_value: 0-D. default value to insert into location `[row, 0, ..., 0]`
+// for rows missing from the input sparse tensor.
+// output indices: 2-D. the indices of the filled sparse tensor.
//
-// Returns Scalar. Serialized `Summary` protocol buffer.
-func ScalarSummary(scope *Scope, tags tf.Output, values tf.Output) (summary tf.Output) {
+// Returns 1-D. the values of the filled sparse tensor.1-D. whether the dense row was missing in the
+// input sparse tensor.1-D. a map from the input indices to the output indices.
+func SparseFillEmptyRows(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output, default_value tf.Output) (output_indices tf.Output, output_values tf.Output, empty_row_indicator tf.Output, reverse_index_map tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "ScalarSummary",
+ Type: "SparseFillEmptyRows",
Input: []tf.Input{
- tags, values,
+ indices, values, dense_shape, default_value,
},
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
}
-// TensorSummaryAttr is an optional argument to TensorSummary.
-type TensorSummaryAttr func(optionalAttr)
-
-// TensorSummaryDescription sets the optional description attribute to value.
+// Reverses specific dimensions of a tensor.
//
-// value: A json-encoded SummaryDescription proto.
-// If not specified, defaults to ""
-func TensorSummaryDescription(value string) TensorSummaryAttr {
- return func(m optionalAttr) {
- m["description"] = value
- }
-}
-
-// TensorSummaryLabels sets the optional labels attribute to value.
+// Given a `tensor`, and a `bool` tensor `dims` representing the dimensions
+// of `tensor`, this operation reverses each dimension i of `tensor` where
+// `dims[i]` is `True`.
//
-// value: An unused list of strings.
-// If not specified, defaults to <>
-func TensorSummaryLabels(value []string) TensorSummaryAttr {
- return func(m optionalAttr) {
- m["labels"] = value
- }
-}
-
-// TensorSummaryDisplayName sets the optional display_name attribute to value.
+// `tensor` can have up to 8 dimensions. The number of dimensions
+// of `tensor` must equal the number of elements in `dims`. In other words:
//
-// value: An unused string.
-// If not specified, defaults to ""
-func TensorSummaryDisplayName(value string) TensorSummaryAttr {
- return func(m optionalAttr) {
- m["display_name"] = value
- }
-}
-
-// Outputs a `Summary` protocol buffer with a tensor.
+// `rank(tensor) = size(dims)`
//
-// This op is being phased out in favor of TensorSummaryV2, which lets callers pass
-// a tag as well as a serialized SummaryMetadata proto string that contains
-// plugin-specific data. We will keep this op to maintain backwards compatibility.
+// For example:
+//
+// ```
+// # tensor 't' is [[[[ 0, 1, 2, 3],
+// # [ 4, 5, 6, 7],
+// # [ 8, 9, 10, 11]],
+// # [[12, 13, 14, 15],
+// # [16, 17, 18, 19],
+// # [20, 21, 22, 23]]]]
+// # tensor 't' shape is [1, 2, 3, 4]
+//
+// # 'dims' is [False, False, False, True]
+// reverse(t, dims) ==> [[[[ 3, 2, 1, 0],
+// [ 7, 6, 5, 4],
+// [ 11, 10, 9, 8]],
+// [[15, 14, 13, 12],
+// [19, 18, 17, 16],
+// [23, 22, 21, 20]]]]
+//
+// # 'dims' is [False, True, False, False]
+// reverse(t, dims) ==> [[[[12, 13, 14, 15],
+// [16, 17, 18, 19],
+// [20, 21, 22, 23]
+// [[ 0, 1, 2, 3],
+// [ 4, 5, 6, 7],
+// [ 8, 9, 10, 11]]]]
+//
+// # 'dims' is [False, False, True, False]
+// reverse(t, dims) ==> [[[[8, 9, 10, 11],
+// [4, 5, 6, 7],
+// [0, 1, 2, 3]]
+// [[20, 21, 22, 23],
+// [16, 17, 18, 19],
+// [12, 13, 14, 15]]]]
+// ```
//
// Arguments:
-// tensor: A tensor to serialize.
-func TensorSummary(scope *Scope, tensor tf.Output, optional ...TensorSummaryAttr) (summary tf.Output) {
+// tensor: Up to 8-D.
+// dims: 1-D. The dimensions to reverse.
+//
+// Returns The same shape as `tensor`.
+func Reverse(scope *Scope, tensor tf.Output, dims tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "TensorSummary",
+ Type: "Reverse",
Input: []tf.Input{
- tensor,
+ tensor, dims,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Creates a dataset that asynchronously prefetches elements from `input_dataset`.
+// Computes log softmax activations.
//
-// Arguments:
+// For each batch `i` and class `j` we have
//
-// buffer_size: The maximum number of elements to buffer in an iterator over
-// this dataset.
+// logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))
//
+// Arguments:
+// logits: 2-D with shape `[batch_size, num_classes]`.
//
-func PrefetchDataset(scope *Scope, input_dataset tf.Output, buffer_size tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
+// Returns Same shape as `logits`.
+func LogSoftmax(scope *Scope, logits tf.Output) (logsoftmax tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
opspec := tf.OpSpec{
- Type: "PrefetchDataset",
+ Type: "LogSoftmax",
Input: []tf.Input{
- input_dataset, buffer_size,
+ logits,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Outputs a `Summary` protocol buffer with a tensor and per-plugin data.
+// Computes the inverse permutation of a tensor.
+//
+// This operation computes the inverse of an index permutation. It takes a 1-D
+// integer tensor `x`, which represents the indices of a zero-based array, and
+// swaps each value with its index position. In other words, for an output tensor
+// `y` and an input tensor `x`, this operation computes the following:
+//
+// `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`
+//
+// The values must include 0. There can be no duplicate values or negative values.
+//
+// For example:
+//
+// ```
+// # tensor `x` is [3, 4, 0, 2, 1]
+// invert_permutation(x) ==> [2, 4, 3, 0, 1]
+// ```
//
// Arguments:
-// tag: A string attached to this summary. Used for organization in TensorBoard.
-// tensor: A tensor to serialize.
-// serialized_summary_metadata: A serialized SummaryMetadata proto. Contains plugin
-// data.
-func TensorSummaryV2(scope *Scope, tag tf.Output, tensor tf.Output, serialized_summary_metadata tf.Output) (summary tf.Output) {
+// x: 1-D.
+//
+// Returns 1-D.
+func InvertPermutation(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "TensorSummaryV2",
+ Type: "InvertPermutation",
Input: []tf.Input{
- tag, tensor, serialized_summary_metadata,
+ x,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// PrintAttr is an optional argument to Print.
-type PrintAttr func(optionalAttr)
-
-// PrintMessage sets the optional message attribute to value.
+// Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor.
//
-// value: A string, prefix of the error message.
-// If not specified, defaults to ""
-func PrintMessage(value string) PrintAttr {
- return func(m optionalAttr) {
- m["message"] = value
- }
-}
-
-// PrintFirstN sets the optional first_n attribute to value.
+// This operation folds the padded areas of `input` by `MirrorPad` according to the
+// `paddings` you specify. `paddings` must be the same as `paddings` argument
+// given to the corresponding `MirrorPad` op.
//
-// value: Only log `first_n` number of times. -1 disables logging.
-// If not specified, defaults to -1
-func PrintFirstN(value int64) PrintAttr {
- return func(m optionalAttr) {
- m["first_n"] = value
- }
-}
-
-// PrintSummarize sets the optional summarize attribute to value.
+// The folded size of each dimension D of the output is:
//
-// value: Only print this many entries of each tensor.
-// If not specified, defaults to 3
-func PrintSummarize(value int64) PrintAttr {
- return func(m optionalAttr) {
- m["summarize"] = value
- }
-}
-
-// Prints a list of tensors.
+// `input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`
//
-// Passes `input` through to `output` and prints `data` when evaluating.
+// For example:
+//
+// ```
+// # 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]].
+// # 'paddings' is [[0, 1]], [0, 1]].
+// # 'mode' is SYMMETRIC.
+// # rank of 't' is 2.
+// pad(t, paddings) ==> [[ 1, 5]
+// [11, 28]]
+// ```
//
// Arguments:
-// input: The tensor passed to `output`
-// data: A list of tensors to print out when op is evaluated.
+// input: The input tensor to be folded.
+// paddings: A two-column matrix specifying the padding sizes. The number of
+// rows must be the same as the rank of `input`.
+// mode: The mode used in the `MirrorPad` op.
//
-// Returns = The unmodified `input` tensor
-func Print(scope *Scope, input tf.Output, data []tf.Output, optional ...PrintAttr) (output tf.Output) {
+// Returns The folded tensor.
+func MirrorPadGrad(scope *Scope, input tf.Output, paddings tf.Output, mode string) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
+ attrs := map[string]interface{}{"mode": mode}
opspec := tf.OpSpec{
- Type: "Print",
+ Type: "MirrorPadGrad",
Input: []tf.Input{
- input, tf.OutputList(data),
+ input, paddings,
},
Attrs: attrs,
}
@@ -8733,60 +8991,64 @@ func Print(scope *Scope, input tf.Output, data []tf.Output, optional ...PrintAtt
return op.Output(0)
}
-// Table initializer that takes two tensors for keys and values respectively.
+// Computes softmax cross entropy cost and gradients to backpropagate.
+//
+// Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept
+// a matrix of label probabilities, but rather a single label per row
+// of features. This label is considered to have probability 1.0 for the
+// given row.
+//
+// Inputs are the logits, not probabilities.
//
// Arguments:
-// table_handle: Handle to a table which will be initialized.
-// keys: Keys of type Tkey.
-// values: Values of type Tval.
+// features: batch_size x num_classes matrix
+// labels: batch_size vector with values in [0, num_classes).
+// This is the label for the given minibatch entry.
//
-// Returns the created operation.
-func InitializeTableV2(scope *Scope, table_handle tf.Output, keys tf.Output, values tf.Output) (o *tf.Operation) {
+// Returns Per example loss (batch_size vector).backpropagated gradients (batch_size x num_classes matrix).
+func SparseSoftmaxCrossEntropyWithLogits(scope *Scope, features tf.Output, labels tf.Output) (loss tf.Output, backprop tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "InitializeTableV2",
+ Type: "SparseSoftmaxCrossEntropyWithLogits",
Input: []tf.Input{
- table_handle, keys, values,
+ features, labels,
},
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0), op.Output(1)
}
-// DataFormatDimMapAttr is an optional argument to DataFormatDimMap.
-type DataFormatDimMapAttr func(optionalAttr)
-
-// DataFormatDimMapSrcFormat sets the optional src_format attribute to value.
-//
-// value: source data format.
-// If not specified, defaults to "NHWC"
-func DataFormatDimMapSrcFormat(value string) DataFormatDimMapAttr {
- return func(m optionalAttr) {
- m["src_format"] = value
- }
-}
+// ResourceSparseApplyAdagradDAAttr is an optional argument to ResourceSparseApplyAdagradDA.
+type ResourceSparseApplyAdagradDAAttr func(optionalAttr)
-// DataFormatDimMapDstFormat sets the optional dst_format attribute to value.
+// ResourceSparseApplyAdagradDAUseLocking sets the optional use_locking attribute to value.
//
-// value: destination data format.
-// If not specified, defaults to "NCHW"
-func DataFormatDimMapDstFormat(value string) DataFormatDimMapAttr {
+// value: If True, updating of the var and accum tensors will be protected by
+// a lock; otherwise the behavior is undefined, but may exhibit less contention.
+// If not specified, defaults to false
+func ResourceSparseApplyAdagradDAUseLocking(value bool) ResourceSparseApplyAdagradDAAttr {
return func(m optionalAttr) {
- m["dst_format"] = value
+ m["use_locking"] = value
}
}
-// Returns the dimension index in the destination data format given the one in
-//
-// the source data format.
+// Update entries in '*var' and '*accum' according to the proximal adagrad scheme.
//
// Arguments:
-// x: A Tensor with each element as a dimension index in source data format.
-// Must be in the range [-4, 4).
+// var_: Should be from a Variable().
+// gradient_accumulator: Should be from a Variable().
+// gradient_squared_accumulator: Should be from a Variable().
+// grad: The gradient.
+// indices: A vector of indices into the first dimension of var and accum.
+// lr: Learning rate. Must be a scalar.
+// l1: L1 regularization. Must be a scalar.
+// l2: L2 regularization. Must be a scalar.
+// global_step: Training step number. Must be a scalar.
//
-// Returns A Tensor with each element as a dimension index in destination data format.
-func DataFormatDimMap(scope *Scope, x tf.Output, optional ...DataFormatDimMapAttr) (y tf.Output) {
+// Returns the created operation.
+func ResourceSparseApplyAdagradDA(scope *Scope, var_ tf.Output, gradient_accumulator tf.Output, gradient_squared_accumulator tf.Output, grad tf.Output, indices tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, global_step tf.Output, optional ...ResourceSparseApplyAdagradDAAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
@@ -8795,173 +9057,192 @@ func DataFormatDimMap(scope *Scope, x tf.Output, optional ...DataFormatDimMapAtt
a(attrs)
}
opspec := tf.OpSpec{
- Type: "DataFormatDimMap",
+ Type: "ResourceSparseApplyAdagradDA",
Input: []tf.Input{
- x,
+ var_, gradient_accumulator, gradient_squared_accumulator, grad, indices, lr, l1, l2, global_step,
},
Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// ResourceApplyPowerSignAttr is an optional argument to ResourceApplyPowerSign.
-type ResourceApplyPowerSignAttr func(optionalAttr)
-
-// ResourceApplyPowerSignUseLocking sets the optional use_locking attribute to value.
-//
-// value: If `True`, updating of the var and m tensors is
-// protected by a lock; otherwise the behavior is undefined, but may exhibit less
-// contention.
-// If not specified, defaults to false
-func ResourceApplyPowerSignUseLocking(value bool) ResourceApplyPowerSignAttr {
- return func(m optionalAttr) {
- m["use_locking"] = value
+// Returns the truth value of NOT x element-wise.
+func LogicalNot(scope *Scope, x tf.Output) (y tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "LogicalNot",
+ Input: []tf.Input{
+ x,
+ },
}
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Update '*var' according to the AddSign update.
+// 3D real-valued fast Fourier transform.
//
-// m_t <- beta1 * m_{t-1} + (1 - beta1) * g
-// update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g
-// variable <- variable - lr_t * update
+// Computes the 3-dimensional discrete Fourier transform of a real-valued signal
+// over the inner-most 3 dimensions of `input`.
+//
+// Since the DFT of a real signal is Hermitian-symmetric, `RFFT3D` only returns the
+// `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
+// of `output`: the zero-frequency term, followed by the `fft_length / 2`
+// positive-frequency terms.
+//
+// Along each axis `RFFT3D` is computed on, if `fft_length` is smaller than the
+// corresponding dimension of `input`, the dimension is cropped. If it is larger,
+// the dimension is padded with zeros.
//
// Arguments:
-// var_: Should be from a Variable().
-// m: Should be from a Variable().
-// lr: Scaling factor. Must be a scalar.
-// logbase: Must be a scalar.
-// sign_decay: Must be a scalar.
-// beta: Must be a scalar.
-// grad: The gradient.
+// input: A float32 tensor.
+// fft_length: An int32 tensor of shape [3]. The FFT length for each dimension.
//
-// Returns the created operation.
-func ResourceApplyPowerSign(scope *Scope, var_ tf.Output, m tf.Output, lr tf.Output, logbase tf.Output, sign_decay tf.Output, beta tf.Output, grad tf.Output, optional ...ResourceApplyPowerSignAttr) (o *tf.Operation) {
+// Returns A complex64 tensor of the same rank as `input`. The inner-most 3
+// dimensions of `input` are replaced with the their 3D Fourier transform. The
+// inner-most dimension contains `fft_length / 2 + 1` unique frequency
+// components.
+//
+// @compatibility(numpy)
+// Equivalent to np.fft.rfftn with 3 dimensions.
+// @end_compatibility
+func RFFT3D(scope *Scope, input tf.Output, fft_length tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "ResourceApplyPowerSign",
+ Type: "RFFT3D",
Input: []tf.Input{
- var_, m, lr, logbase, sign_decay, beta, grad,
+ input, fft_length,
},
- Attrs: attrs,
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// CropAndResizeAttr is an optional argument to CropAndResize.
-type CropAndResizeAttr func(optionalAttr)
+// TensorArrayV3Attr is an optional argument to TensorArrayV3.
+type TensorArrayV3Attr func(optionalAttr)
-// CropAndResizeMethod sets the optional method attribute to value.
+// TensorArrayV3ElementShape sets the optional element_shape attribute to value.
//
-// value: A string specifying the interpolation method. Only 'bilinear' is
-// supported for now.
-// If not specified, defaults to "bilinear"
-func CropAndResizeMethod(value string) CropAndResizeAttr {
+// value: The expected shape of an element, if known. Used to
+// validate the shapes of TensorArray elements. If this shape is not
+// fully specified, gathering zero-size TensorArrays is an error.
+// If not specified, defaults to <unknown_rank:true >
+func TensorArrayV3ElementShape(value tf.Shape) TensorArrayV3Attr {
return func(m optionalAttr) {
- m["method"] = value
+ m["element_shape"] = value
}
}
-// CropAndResizeExtrapolationValue sets the optional extrapolation_value attribute to value.
+// TensorArrayV3DynamicSize sets the optional dynamic_size attribute to value.
//
-// value: Value used for extrapolation, when applicable.
-// If not specified, defaults to 0
-func CropAndResizeExtrapolationValue(value float32) CropAndResizeAttr {
+// value: A boolean that determines whether writes to the TensorArray
+// are allowed to grow the size. By default, this is not allowed.
+// If not specified, defaults to false
+func TensorArrayV3DynamicSize(value bool) TensorArrayV3Attr {
return func(m optionalAttr) {
- m["extrapolation_value"] = value
+ m["dynamic_size"] = value
}
}
-// Extracts crops from the input image tensor and bilinearly resizes them (possibly
+// TensorArrayV3ClearAfterRead sets the optional clear_after_read attribute to value.
//
-// with aspect ratio change) to a common output size specified by `crop_size`. This
-// is more general than the `crop_to_bounding_box` op which extracts a fixed size
-// slice from the input image and does not allow resizing or aspect ratio change.
+// value: If true (default), Tensors in the TensorArray are cleared
+// after being read. This disables multiple read semantics but allows early
+// release of memory.
+// If not specified, defaults to true
+func TensorArrayV3ClearAfterRead(value bool) TensorArrayV3Attr {
+ return func(m optionalAttr) {
+ m["clear_after_read"] = value
+ }
+}
+
+// TensorArrayV3IdenticalElementShapes sets the optional identical_element_shapes attribute to value.
//
-// Returns a tensor with `crops` from the input `image` at positions defined at the
-// bounding box locations in `boxes`. The cropped boxes are all resized (with
-// bilinear interpolation) to a fixed `size = [crop_height, crop_width]`. The
-// result is a 4-D tensor `[num_boxes, crop_height, crop_width, depth]`. The
-// resizing is corner aligned. In particular, if `boxes = [[0, 0, 1, 1]]`, the
-// method will give identical results to using `tf.image.resize_bilinear()`
-// with `align_corners=True`.
+// value: If true (default is false), then all
+// elements in the TensorArray will be expected to have have identical shapes.
+// This allows certain behaviors, like dynamically checking for
+// consistent shapes on write, and being able to fill in properly
+// shaped zero tensors on stack -- even if the element_shape attribute
+// is not fully defined.
+// If not specified, defaults to false
+func TensorArrayV3IdenticalElementShapes(value bool) TensorArrayV3Attr {
+ return func(m optionalAttr) {
+ m["identical_element_shapes"] = value
+ }
+}
+
+// TensorArrayV3TensorArrayName sets the optional tensor_array_name attribute to value.
+//
+// value: Overrides the name used for the temporary tensor_array
+// resource. Default value is the name of the 'TensorArray' op (which
+// is guaranteed unique).
+// If not specified, defaults to ""
+func TensorArrayV3TensorArrayName(value string) TensorArrayV3Attr {
+ return func(m optionalAttr) {
+ m["tensor_array_name"] = value
+ }
+}
+
+// An array of Tensors of given size.
+//
+// Write data via Write and read via Read or Pack.
//
// Arguments:
-// image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
-// Both `image_height` and `image_width` need to be positive.
-// boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
-// specifies the coordinates of a box in the `box_ind[i]` image and is specified
-// in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
-// `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
-// `[0, 1]` interval of normalized image height is mapped to
-// `[0, image_height - 1]` in image height coordinates. We do allow `y1` > `y2`, in
-// which case the sampled crop is an up-down flipped version of the original
-// image. The width dimension is treated similarly. Normalized coordinates
-// outside the `[0, 1]` range are allowed, in which case we use
-// `extrapolation_value` to extrapolate the input image values.
-// box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
-// The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
-// crop_size: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All
-// cropped image patches are resized to this size. The aspect ratio of the image
-// content is not preserved. Both `crop_height` and `crop_width` need to be
-// positive.
+// size: The size of the array.
+// dtype: The type of the elements on the tensor_array.
//
-// Returns A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
-func CropAndResize(scope *Scope, image tf.Output, boxes tf.Output, box_ind tf.Output, crop_size tf.Output, optional ...CropAndResizeAttr) (crops tf.Output) {
+// Returns The handle to the TensorArray.A scalar used to control gradient flow.
+func TensorArrayV3(scope *Scope, size tf.Output, dtype tf.DataType, optional ...TensorArrayV3Attr) (handle tf.Output, flow tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"dtype": dtype}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "CropAndResize",
+ Type: "TensorArrayV3",
Input: []tf.Input{
- image, boxes, box_ind, crop_size,
+ size,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1)
}
-// MaxPoolGradAttr is an optional argument to MaxPoolGrad.
-type MaxPoolGradAttr func(optionalAttr)
+// MaxPool3DAttr is an optional argument to MaxPool3D.
+type MaxPool3DAttr func(optionalAttr)
-// MaxPoolGradDataFormat sets the optional data_format attribute to value.
+// MaxPool3DDataFormat sets the optional data_format attribute to value.
//
-// value: Specify the data format of the input and output data. With the
-// default format "NHWC", the data is stored in the order of:
-// [batch, in_height, in_width, in_channels].
-// Alternatively, the format could be "NCHW", the data storage order of:
-// [batch, in_channels, in_height, in_width].
-// If not specified, defaults to "NHWC"
-func MaxPoolGradDataFormat(value string) MaxPoolGradAttr {
+// value: The data format of the input and output data. With the
+// default format "NDHWC", the data is stored in the order of:
+// [batch, in_depth, in_height, in_width, in_channels].
+// Alternatively, the format could be "NCDHW", the data storage order is:
+// [batch, in_channels, in_depth, in_height, in_width].
+// If not specified, defaults to "NDHWC"
+func MaxPool3DDataFormat(value string) MaxPool3DAttr {
return func(m optionalAttr) {
m["data_format"] = value
}
}
-// Computes gradients of the maxpooling function.
+// Performs 3D max pooling on the input.
//
// Arguments:
-// orig_input: The original input tensor.
-// orig_output: The original output tensor.
-// grad: 4-D. Gradients w.r.t. the output of `max_pool`.
-// ksize: The size of the window for each dimension of the input tensor.
-// strides: The stride of the sliding window for each dimension of the
-// input tensor.
+// input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
+// ksize: 1-D tensor of length 5. The size of the window for each dimension of
+// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
+// strides: 1-D tensor of length 5. The stride of the sliding window for each
+// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
// padding: The type of padding algorithm to use.
//
-// Returns Gradients w.r.t. the input to `max_pool`.
-func MaxPoolGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolGradAttr) (output tf.Output) {
+// Returns The max pooled output tensor.
+func MaxPool3D(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPool3DAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -8970,9 +9251,9 @@ func MaxPoolGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad
a(attrs)
}
opspec := tf.OpSpec{
- Type: "MaxPoolGrad",
+ Type: "MaxPool3D",
Input: []tf.Input{
- orig_input, orig_output, grad,
+ input,
},
Attrs: attrs,
}
@@ -8980,124 +9261,152 @@ func MaxPoolGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad
return op.Output(0)
}
-// EncodeJpegAttr is an optional argument to EncodeJpeg.
-type EncodeJpegAttr func(optionalAttr)
-
-// EncodeJpegFormat sets the optional format attribute to value.
+// Computes the gradients of 3-D convolution with respect to the input.
//
-// value: Per pixel image format.
-// If not specified, defaults to ""
-func EncodeJpegFormat(value string) EncodeJpegAttr {
- return func(m optionalAttr) {
- m["format"] = value
- }
-}
-
-// EncodeJpegQuality sets the optional quality attribute to value.
+// DEPRECATED at GraphDef version 10: Use Conv3DBackpropInputV2
//
-// value: Quality of the compression from 0 to 100 (higher is better and slower).
-// If not specified, defaults to 95
-func EncodeJpegQuality(value int64) EncodeJpegAttr {
- return func(m optionalAttr) {
- m["quality"] = value
+// Arguments:
+// input: Shape `[batch, depth, rows, cols, in_channels]`.
+// filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
+// `in_channels` must match between `input` and `filter`.
+// out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
+// out_channels]`.
+// strides: 1-D tensor of length 5. The stride of the sliding window for each
+// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
+// padding: The type of padding algorithm to use.
+func Conv3DBackpropInput(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string) (output tf.Output) {
+ if scope.Err() != nil {
+ return
}
-}
-
-// EncodeJpegProgressive sets the optional progressive attribute to value.
-//
-// value: If True, create a JPEG that loads progressively (coarse to fine).
-// If not specified, defaults to false
-func EncodeJpegProgressive(value bool) EncodeJpegAttr {
- return func(m optionalAttr) {
- m["progressive"] = value
+ attrs := map[string]interface{}{"strides": strides, "padding": padding}
+ opspec := tf.OpSpec{
+ Type: "Conv3DBackpropInput",
+ Input: []tf.Input{
+ input, filter, out_backprop,
+ },
+ Attrs: attrs,
}
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// EncodeJpegOptimizeSize sets the optional optimize_size attribute to value.
+// Inverse 2D fast Fourier transform.
//
-// value: If True, spend CPU/RAM to reduce size with no quality change.
-// If not specified, defaults to false
-func EncodeJpegOptimizeSize(value bool) EncodeJpegAttr {
- return func(m optionalAttr) {
- m["optimize_size"] = value
- }
-}
-
-// EncodeJpegChromaDownsampling sets the optional chroma_downsampling attribute to value.
+// Computes the inverse 2-dimensional discrete Fourier transform over the
+// inner-most 2 dimensions of `input`.
//
-// value: See http://en.wikipedia.org/wiki/Chroma_subsampling.
-// If not specified, defaults to true
-func EncodeJpegChromaDownsampling(value bool) EncodeJpegAttr {
- return func(m optionalAttr) {
- m["chroma_downsampling"] = value
- }
-}
-
-// EncodeJpegDensityUnit sets the optional density_unit attribute to value.
+// Arguments:
+// input: A complex64 tensor.
//
-// value: Unit used to specify `x_density` and `y_density`:
-// pixels per inch (`'in'`) or centimeter (`'cm'`).
-// If not specified, defaults to "in"
-func EncodeJpegDensityUnit(value string) EncodeJpegAttr {
- return func(m optionalAttr) {
- m["density_unit"] = value
+// Returns A complex64 tensor of the same shape as `input`. The inner-most 2
+// dimensions of `input` are replaced with their inverse 2D Fourier transform.
+//
+// @compatibility(numpy)
+// Equivalent to np.fft.ifft2
+// @end_compatibility
+func IFFT2D(scope *Scope, input tf.Output) (output tf.Output) {
+ if scope.Err() != nil {
+ return
}
+ opspec := tf.OpSpec{
+ Type: "IFFT2D",
+ Input: []tf.Input{
+ input,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// EncodeJpegXDensity sets the optional x_density attribute to value.
+// Creates a tensor filled with a scalar value.
//
-// value: Horizontal pixels per density unit.
-// If not specified, defaults to 300
-func EncodeJpegXDensity(value int64) EncodeJpegAttr {
- return func(m optionalAttr) {
- m["x_density"] = value
+// This operation creates a tensor of shape `dims` and fills it with `value`.
+//
+// For example:
+//
+// ```
+// # Output tensor has shape [2, 3].
+// fill([2, 3], 9) ==> [[9, 9, 9]
+// [9, 9, 9]]
+// ```
+//
+// Arguments:
+// dims: 1-D. Represents the shape of the output tensor.
+// value: 0-D (scalar). Value to fill the returned tensor.
+//
+// @compatibility(numpy)
+// Equivalent to np.full
+// @end_compatibility
+func Fill(scope *Scope, dims tf.Output, value tf.Output) (output tf.Output) {
+ if scope.Err() != nil {
+ return
}
+ opspec := tf.OpSpec{
+ Type: "Fill",
+ Input: []tf.Input{
+ dims, value,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// EncodeJpegYDensity sets the optional y_density attribute to value.
+// 2D fast Fourier transform.
//
-// value: Vertical pixels per density unit.
-// If not specified, defaults to 300
-func EncodeJpegYDensity(value int64) EncodeJpegAttr {
- return func(m optionalAttr) {
- m["y_density"] = value
+// Computes the 2-dimensional discrete Fourier transform over the inner-most
+// 2 dimensions of `input`.
+//
+// Arguments:
+// input: A complex64 tensor.
+//
+// Returns A complex64 tensor of the same shape as `input`. The inner-most 2
+// dimensions of `input` are replaced with their 2D Fourier transform.
+//
+// @compatibility(numpy)
+// Equivalent to np.fft.fft2
+// @end_compatibility
+func FFT2D(scope *Scope, input tf.Output) (output tf.Output) {
+ if scope.Err() != nil {
+ return
}
+ opspec := tf.OpSpec{
+ Type: "FFT2D",
+ Input: []tf.Input{
+ input,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// EncodeJpegXmpMetadata sets the optional xmp_metadata attribute to value.
+// ResourceApplyProximalGradientDescentAttr is an optional argument to ResourceApplyProximalGradientDescent.
+type ResourceApplyProximalGradientDescentAttr func(optionalAttr)
+
+// ResourceApplyProximalGradientDescentUseLocking sets the optional use_locking attribute to value.
//
-// value: If not empty, embed this XMP metadata in the image header.
-// If not specified, defaults to ""
-func EncodeJpegXmpMetadata(value string) EncodeJpegAttr {
+// value: If True, the subtraction will be protected by a lock;
+// otherwise the behavior is undefined, but may exhibit less contention.
+// If not specified, defaults to false
+func ResourceApplyProximalGradientDescentUseLocking(value bool) ResourceApplyProximalGradientDescentAttr {
return func(m optionalAttr) {
- m["xmp_metadata"] = value
+ m["use_locking"] = value
}
}
-// JPEG-encode an image.
-//
-// `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`.
-//
-// The attr `format` can be used to override the color format of the encoded
-// output. Values can be:
-//
-// * `''`: Use a default format based on the number of channels in the image.
-// * `grayscale`: Output a grayscale JPEG image. The `channels` dimension
-// of `image` must be 1.
-// * `rgb`: Output an RGB JPEG image. The `channels` dimension
-// of `image` must be 3.
-//
-// If `format` is not specified or is the empty string, a default format is picked
-// in function of the number of channels in `image`:
+// Update '*var' as FOBOS algorithm with fixed learning rate.
//
-// * 1: Output a grayscale image.
-// * 3: Output an RGB image.
+// prox_v = var - alpha * delta
+// var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
//
// Arguments:
-// image: 3-D with shape `[height, width, channels]`.
+// var_: Should be from a Variable().
+// alpha: Scaling factor. Must be a scalar.
+// l1: L1 regularization. Must be a scalar.
+// l2: L2 regularization. Must be a scalar.
+// delta: The change.
//
-// Returns 0-D. JPEG-encoded image.
-func EncodeJpeg(scope *Scope, image tf.Output, optional ...EncodeJpegAttr) (contents tf.Output) {
+// Returns the created operation.
+func ResourceApplyProximalGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, l1 tf.Output, l2 tf.Output, delta tf.Output, optional ...ResourceApplyProximalGradientDescentAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
@@ -9106,194 +9415,165 @@ func EncodeJpeg(scope *Scope, image tf.Output, optional ...EncodeJpegAttr) (cont
a(attrs)
}
opspec := tf.OpSpec{
- Type: "EncodeJpeg",
+ Type: "ResourceApplyProximalGradientDescent",
Input: []tf.Input{
- image,
+ var_, alpha, l1, l2, delta,
},
Attrs: attrs,
}
+ return scope.AddOperation(opspec)
+}
+
+// Computes the gradient for the sqrt of `x` wrt its input.
+//
+// Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy`
+// is the corresponding input gradient.
+func SqrtGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "SqrtGrad",
+ Input: []tf.Input{
+ y, dy,
+ },
+ }
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Gradients for batch normalization.
-//
-// DEPRECATED at GraphDef version 9: Use tf.nn.batch_normalization()
-//
-// This op is deprecated. See `tf.nn.batch_normalization`.
+// Get the value of the tensor specified by its handle.
//
// Arguments:
-// t: A 4D input Tensor.
-// m: A 1D mean Tensor with size matching the last dimension of t.
-// This is the first output from tf.nn.moments,
-// or a saved moving average thereof.
-// v: A 1D variance Tensor with size matching the last dimension of t.
-// This is the second output from tf.nn.moments,
-// or a saved moving average thereof.
-// gamma: A 1D gamma Tensor with size matching the last dimension of t.
-// If "scale_after_normalization" is true, this Tensor will be multiplied
-// with the normalized Tensor.
-// backprop: 4D backprop Tensor.
-// variance_epsilon: A small float number to avoid dividing by 0.
-// scale_after_normalization: A bool indicating whether the resulted tensor
-// needs to be multiplied with gamma.
+// handle: The handle for a tensor stored in the session state.
+// dtype: The type of the output value.
//
-// Returns 4D backprop tensor for input.1D backprop tensor for mean.1D backprop tensor for variance.1D backprop tensor for beta.1D backprop tensor for gamma.
-func BatchNormWithGlobalNormalizationGrad(scope *Scope, t tf.Output, m tf.Output, v tf.Output, gamma tf.Output, backprop tf.Output, variance_epsilon float32, scale_after_normalization bool) (dx tf.Output, dm tf.Output, dv tf.Output, db tf.Output, dg tf.Output) {
+// Returns The tensor for the given handle.
+func GetSessionTensor(scope *Scope, handle tf.Output, dtype tf.DataType) (value tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"variance_epsilon": variance_epsilon, "scale_after_normalization": scale_after_normalization}
+ attrs := map[string]interface{}{"dtype": dtype}
opspec := tf.OpSpec{
- Type: "BatchNormWithGlobalNormalizationGrad",
+ Type: "GetSessionTensor",
Input: []tf.Input{
- t, m, v, gamma, backprop,
+ handle,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
-}
-
-// FusedBatchNormV2Attr is an optional argument to FusedBatchNormV2.
-type FusedBatchNormV2Attr func(optionalAttr)
-
-// FusedBatchNormV2Epsilon sets the optional epsilon attribute to value.
-//
-// value: A small float number added to the variance of x.
-// If not specified, defaults to 0.0001
-func FusedBatchNormV2Epsilon(value float32) FusedBatchNormV2Attr {
- return func(m optionalAttr) {
- m["epsilon"] = value
- }
+ return op.Output(0)
}
-// FusedBatchNormV2DataFormat sets the optional data_format attribute to value.
+// Returns x - y element-wise.
//
-// value: The data format for x and y. Either "NHWC" (default) or "NCHW".
-// If not specified, defaults to "NHWC"
-func FusedBatchNormV2DataFormat(value string) FusedBatchNormV2Attr {
- return func(m optionalAttr) {
- m["data_format"] = value
+// *NOTE*: `Subtract` supports broadcasting. More about broadcasting
+// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+func Sub(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
+ if scope.Err() != nil {
+ return
}
-}
-
-// FusedBatchNormV2IsTraining sets the optional is_training attribute to value.
-//
-// value: A bool value to indicate the operation is for training (default)
-// or inference.
-// If not specified, defaults to true
-func FusedBatchNormV2IsTraining(value bool) FusedBatchNormV2Attr {
- return func(m optionalAttr) {
- m["is_training"] = value
+ opspec := tf.OpSpec{
+ Type: "Sub",
+ Input: []tf.Input{
+ x, y,
+ },
}
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Batch normalization.
+// Computes softmax cross entropy cost and gradients to backpropagate.
//
-// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
-// The size of 1D Tensors matches the dimension C of the 4D Tensors.
+// Inputs are the logits, not probabilities.
//
// Arguments:
-// x: A 4D Tensor for input data.
-// scale: A 1D Tensor for scaling factor, to scale the normalized x.
-// offset: A 1D Tensor for offset, to shift to the normalized x.
-// mean: A 1D Tensor for population mean. Used for inference only;
-// must be empty for training.
-// variance: A 1D Tensor for population variance. Used for inference only;
-// must be empty for training.
+// features: batch_size x num_classes matrix
+// labels: batch_size x num_classes matrix
+// The caller must ensure that each batch of labels represents a valid
+// probability distribution.
//
-// Returns A 4D Tensor for output data.A 1D Tensor for the computed batch mean, to be used by TensorFlow
-// to compute the running mean.A 1D Tensor for the computed batch variance, to be used by
-// TensorFlow to compute the running variance.A 1D Tensor for the computed batch mean, to be reused
-// in the gradient computation.A 1D Tensor for the computed batch variance (inverted variance
-// in the cuDNN case), to be reused in the gradient computation.
-func FusedBatchNormV2(scope *Scope, x tf.Output, scale tf.Output, offset tf.Output, mean tf.Output, variance tf.Output, optional ...FusedBatchNormV2Attr) (y tf.Output, batch_mean tf.Output, batch_variance tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output) {
+// Returns Per example loss (batch_size vector).backpropagated gradients (batch_size x num_classes matrix).
+func SoftmaxCrossEntropyWithLogits(scope *Scope, features tf.Output, labels tf.Output) (loss tf.Output, backprop tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "FusedBatchNormV2",
+ Type: "SoftmaxCrossEntropyWithLogits",
Input: []tf.Input{
- x, scale, offset, mean, variance,
+ features, labels,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
+ return op.Output(0), op.Output(1)
}
-// Conv2DBackpropInputAttr is an optional argument to Conv2DBackpropInput.
-type Conv2DBackpropInputAttr func(optionalAttr)
-
-// Conv2DBackpropInputUseCudnnOnGpu sets the optional use_cudnn_on_gpu attribute to value.
-// If not specified, defaults to true
-func Conv2DBackpropInputUseCudnnOnGpu(value bool) Conv2DBackpropInputAttr {
- return func(m optionalAttr) {
- m["use_cudnn_on_gpu"] = value
- }
-}
+// ReduceJoinAttr is an optional argument to ReduceJoin.
+type ReduceJoinAttr func(optionalAttr)
-// Conv2DBackpropInputDataFormat sets the optional data_format attribute to value.
+// ReduceJoinKeepDims sets the optional keep_dims attribute to value.
//
-// value: Specify the data format of the input and output data. With the
-// default format "NHWC", the data is stored in the order of:
-// [batch, in_height, in_width, in_channels].
-// Alternatively, the format could be "NCHW", the data storage order of:
-// [batch, in_channels, in_height, in_width].
-// If not specified, defaults to "NHWC"
-func Conv2DBackpropInputDataFormat(value string) Conv2DBackpropInputAttr {
+// value: If `True`, retain reduced dimensions with length `1`.
+// If not specified, defaults to false
+func ReduceJoinKeepDims(value bool) ReduceJoinAttr {
return func(m optionalAttr) {
- m["data_format"] = value
+ m["keep_dims"] = value
}
}
-// Conv2DBackpropInputDilations sets the optional dilations attribute to value.
+// ReduceJoinSeparator sets the optional separator attribute to value.
//
-// value: 1-D tensor of length 4. The dilation factor for each dimension of
-// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
-// element on that dimension. The dimension order is determined by the value of
-// `data_format`, see above for details. Dilations in the batch and depth
-// dimensions must be 1.
-// If not specified, defaults to <i:1 i:1 i:1 i:1 >
-func Conv2DBackpropInputDilations(value []int64) Conv2DBackpropInputAttr {
+// value: The separator to use when joining.
+// If not specified, defaults to ""
+func ReduceJoinSeparator(value string) ReduceJoinAttr {
return func(m optionalAttr) {
- m["dilations"] = value
+ m["separator"] = value
}
}
-// Computes the gradients of convolution with respect to the input.
+// Joins a string Tensor across the given dimensions.
+//
+// Computes the string join across dimensions in the given string Tensor of shape
+// `[d_0, d_1, ..., d_n-1]`. Returns a new Tensor created by joining the input
+// strings with the given separator (default: empty string). Negative indices are
+// counted backwards from the end, with `-1` being equivalent to `n - 1`.
+//
+// For example:
+//
+// ```python
+// # tensor `a` is [["a", "b"], ["c", "d"]]
+// tf.reduce_join(a, 0) ==> ["ac", "bd"]
+// tf.reduce_join(a, 1) ==> ["ab", "cd"]
+// tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"]
+// tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"]
+// tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]]
+// tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]]
+// tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"]
+// tf.reduce_join(a, [0, 1]) ==> ["acbd"]
+// tf.reduce_join(a, [1, 0]) ==> ["abcd"]
+// tf.reduce_join(a, []) ==> ["abcd"]
+// ```
//
// Arguments:
-// input_sizes: An integer vector representing the shape of `input`,
-// where `input` is a 4-D `[batch, height, width, channels]` tensor.
-// filter: 4-D with shape
-// `[filter_height, filter_width, in_channels, out_channels]`.
-// out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`.
-// Gradients w.r.t. the output of the convolution.
-// strides: The stride of the sliding window for each dimension of the input
-// of the convolution. Must be in the same order as the dimension specified with
-// format.
-// padding: The type of padding algorithm to use.
+// inputs: The input to be joined. All reduced indices must have non-zero size.
+// reduction_indices: The dimensions to reduce over. Dimensions are reduced in the
+// order specified. Omitting `reduction_indices` is equivalent to passing
+// `[n-1, n-2, ..., 0]`. Negative indices from `-n` to `-1` are supported.
//
-// Returns 4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient
-// w.r.t. the input of the convolution.
-func Conv2DBackpropInput(scope *Scope, input_sizes tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv2DBackpropInputAttr) (output tf.Output) {
+// Returns Has shape equal to that of the input with reduced dimensions removed or
+// set to `1` depending on `keep_dims`.
+func ReduceJoin(scope *Scope, inputs tf.Output, reduction_indices tf.Output, optional ...ReduceJoinAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"strides": strides, "padding": padding}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "Conv2DBackpropInput",
+ Type: "ReduceJoin",
Input: []tf.Input{
- input_sizes, filter, out_backprop,
+ inputs, reduction_indices,
},
Attrs: attrs,
}
@@ -9301,60 +9581,78 @@ func Conv2DBackpropInput(scope *Scope, input_sizes tf.Output, filter tf.Output,
return op.Output(0)
}
-// FusedBatchNormAttr is an optional argument to FusedBatchNorm.
-type FusedBatchNormAttr func(optionalAttr)
+// Computes cos of x element-wise.
+func Cos(scope *Scope, x tf.Output) (y tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "Cos",
+ Input: []tf.Input{
+ x,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
-// FusedBatchNormEpsilon sets the optional epsilon attribute to value.
+// FusedBatchNormGradAttr is an optional argument to FusedBatchNormGrad.
+type FusedBatchNormGradAttr func(optionalAttr)
+
+// FusedBatchNormGradEpsilon sets the optional epsilon attribute to value.
//
// value: A small float number added to the variance of x.
// If not specified, defaults to 0.0001
-func FusedBatchNormEpsilon(value float32) FusedBatchNormAttr {
+func FusedBatchNormGradEpsilon(value float32) FusedBatchNormGradAttr {
return func(m optionalAttr) {
m["epsilon"] = value
}
}
-// FusedBatchNormDataFormat sets the optional data_format attribute to value.
+// FusedBatchNormGradDataFormat sets the optional data_format attribute to value.
//
-// value: The data format for x and y. Either "NHWC" (default) or "NCHW".
+// value: The data format for y_backprop, x, x_backprop.
+// Either "NHWC" (default) or "NCHW".
// If not specified, defaults to "NHWC"
-func FusedBatchNormDataFormat(value string) FusedBatchNormAttr {
+func FusedBatchNormGradDataFormat(value string) FusedBatchNormGradAttr {
return func(m optionalAttr) {
m["data_format"] = value
}
}
-// FusedBatchNormIsTraining sets the optional is_training attribute to value.
+// FusedBatchNormGradIsTraining sets the optional is_training attribute to value.
//
// value: A bool value to indicate the operation is for training (default)
// or inference.
// If not specified, defaults to true
-func FusedBatchNormIsTraining(value bool) FusedBatchNormAttr {
+func FusedBatchNormGradIsTraining(value bool) FusedBatchNormGradAttr {
return func(m optionalAttr) {
m["is_training"] = value
}
}
-// Batch normalization.
+// Gradient for batch normalization.
//
// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
// The size of 1D Tensors matches the dimension C of the 4D Tensors.
//
// Arguments:
+// y_backprop: A 4D Tensor for the gradient with respect to y.
// x: A 4D Tensor for input data.
// scale: A 1D Tensor for scaling factor, to scale the normalized x.
-// offset: A 1D Tensor for offset, to shift to the normalized x.
-// mean: A 1D Tensor for population mean. Used for inference only;
-// must be empty for training.
-// variance: A 1D Tensor for population variance. Used for inference only;
-// must be empty for training.
+// reserve_space_1: When is_training is True, a 1D Tensor for the computed batch
+// mean to be reused in gradient computation. When is_training is
+// False, a 1D Tensor for the population mean to be reused in both
+// 1st and 2nd order gradient computation.
+// reserve_space_2: When is_training is True, a 1D Tensor for the computed batch
+// variance (inverted variance in the cuDNN case) to be reused in
+// gradient computation. When is_training is False, a 1D Tensor
+// for the population variance to be reused in both 1st and 2nd
+// order gradient computation.
//
-// Returns A 4D Tensor for output data.A 1D Tensor for the computed batch mean, to be used by TensorFlow
-// to compute the running mean.A 1D Tensor for the computed batch variance, to be used by
-// TensorFlow to compute the running variance.A 1D Tensor for the computed batch mean, to be reused
-// in the gradient computation.A 1D Tensor for the computed batch variance (inverted variance
-// in the cuDNN case), to be reused in the gradient computation.
-func FusedBatchNorm(scope *Scope, x tf.Output, scale tf.Output, offset tf.Output, mean tf.Output, variance tf.Output, optional ...FusedBatchNormAttr) (y tf.Output, batch_mean tf.Output, batch_variance tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output) {
+// Returns A 4D Tensor for the gradient with respect to x.A 1D Tensor for the gradient with respect to scale.A 1D Tensor for the gradient with respect to offset.Unused placeholder to match the mean input in FusedBatchNorm.Unused placeholder to match the variance input
+// in FusedBatchNorm.
+func FusedBatchNormGrad(scope *Scope, y_backprop tf.Output, x tf.Output, scale tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output, optional ...FusedBatchNormGradAttr) (x_backprop tf.Output, scale_backprop tf.Output, offset_backprop tf.Output, reserve_space_3 tf.Output, reserve_space_4 tf.Output) {
if scope.Err() != nil {
return
}
@@ -9363,9 +9661,9 @@ func FusedBatchNorm(scope *Scope, x tf.Output, scale tf.Output, offset tf.Output
a(attrs)
}
opspec := tf.OpSpec{
- Type: "FusedBatchNorm",
+ Type: "FusedBatchNormGrad",
Input: []tf.Input{
- x, scale, offset, mean, variance,
+ y_backprop, x, scale, reserve_space_1, reserve_space_2,
},
Attrs: attrs,
}
@@ -9373,576 +9671,525 @@ func FusedBatchNorm(scope *Scope, x tf.Output, scale tf.Output, offset tf.Output
return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
}
-// RandomStandardNormalAttr is an optional argument to RandomStandardNormal.
-type RandomStandardNormalAttr func(optionalAttr)
+// TopKAttr is an optional argument to TopK.
+type TopKAttr func(optionalAttr)
-// RandomStandardNormalSeed sets the optional seed attribute to value.
+// TopKSorted sets the optional sorted attribute to value.
//
-// value: If either `seed` or `seed2` are set to be non-zero, the random number
-// generator is seeded by the given seed. Otherwise, it is seeded by a
-// random seed.
-// If not specified, defaults to 0
-func RandomStandardNormalSeed(value int64) RandomStandardNormalAttr {
+// value: If true the resulting `k` elements will be sorted by the values in
+// descending order.
+// If not specified, defaults to true
+func TopKSorted(value bool) TopKAttr {
return func(m optionalAttr) {
- m["seed"] = value
+ m["sorted"] = value
}
}
-// RandomStandardNormalSeed2 sets the optional seed2 attribute to value.
+// Finds values and indices of the `k` largest elements for the last dimension.
//
-// value: A second seed to avoid seed collision.
-// If not specified, defaults to 0
-func RandomStandardNormalSeed2(value int64) RandomStandardNormalAttr {
- return func(m optionalAttr) {
- m["seed2"] = value
- }
-}
-
-// Outputs random values from a normal distribution.
+// DEPRECATED at GraphDef version 7: Use TopKV2 instead
//
-// The generated values will have mean 0 and standard deviation 1.
+// If the input is a vector (rank-1), finds the `k` largest entries in the vector
+// and outputs their values and indices as vectors. Thus `values[j]` is the
+// `j`-th largest entry in `input`, and its index is `indices[j]`.
+//
+// For matrices (resp. higher rank input), computes the top `k` entries in each
+// row (resp. vector along the last dimension). Thus,
+//
+// values.shape = indices.shape = input.shape[:-1] + [k]
+//
+// If two elements are equal, the lower-index element appears first.
+//
+// If `k` varies dynamically, use `TopKV2` below.
//
// Arguments:
-// shape: The shape of the output tensor.
-// dtype: The type of the output.
+// input: 1-D or higher with last dimension at least `k`.
+// k: Number of top elements to look for along the last dimension (along each
+// row for matrices).
//
-// Returns A tensor of the specified shape filled with random normal values.
-func RandomStandardNormal(scope *Scope, shape tf.Output, dtype tf.DataType, optional ...RandomStandardNormalAttr) (output tf.Output) {
+// Returns The `k` largest elements along each last dimensional slice.The indices of `values` within the last dimension of `input`.
+func TopK(scope *Scope, input tf.Output, k int64, optional ...TopKAttr) (values tf.Output, indices tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtype": dtype}
+ attrs := map[string]interface{}{"k": k}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "RandomStandardNormal",
+ Type: "TopK",
Input: []tf.Input{
- shape,
+ input,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1)
}
-// Computes sigmoid of `x` element-wise.
+// Transforms a Tensor into a serialized TensorProto proto.
//
-// Specifically, `y = 1 / (1 + exp(-x))`.
-func Sigmoid(scope *Scope, x tf.Output) (y tf.Output) {
+// Arguments:
+// tensor: A Tensor of type `T`.
+//
+// Returns A serialized TensorProto proto of the input tensor.
+func SerializeTensor(scope *Scope, tensor tf.Output) (serialized tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Sigmoid",
+ Type: "SerializeTensor",
Input: []tf.Input{
- x,
+ tensor,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// ComputeAccidentalHitsAttr is an optional argument to ComputeAccidentalHits.
-type ComputeAccidentalHitsAttr func(optionalAttr)
-
-// ComputeAccidentalHitsSeed sets the optional seed attribute to value.
-//
-// value: If either seed or seed2 are set to be non-zero, the random number
-// generator is seeded by the given seed. Otherwise, it is seeded by a
-// random seed.
-// If not specified, defaults to 0
-func ComputeAccidentalHitsSeed(value int64) ComputeAccidentalHitsAttr {
- return func(m optionalAttr) {
- m["seed"] = value
- }
-}
+// MatrixSolveAttr is an optional argument to MatrixSolve.
+type MatrixSolveAttr func(optionalAttr)
-// ComputeAccidentalHitsSeed2 sets the optional seed2 attribute to value.
+// MatrixSolveAdjoint sets the optional adjoint attribute to value.
//
-// value: An second seed to avoid seed collision.
-// If not specified, defaults to 0
-func ComputeAccidentalHitsSeed2(value int64) ComputeAccidentalHitsAttr {
+// value: Boolean indicating whether to solve with `matrix` or its (block-wise)
+// adjoint.
+// If not specified, defaults to false
+func MatrixSolveAdjoint(value bool) MatrixSolveAttr {
return func(m optionalAttr) {
- m["seed2"] = value
+ m["adjoint"] = value
}
}
-// Computes the ids of the positions in sampled_candidates that match true_labels.
+// Solves systems of linear equations.
//
-// When doing log-odds NCE, the result of this op should be passed through a
-// SparseToDense op, then added to the logits of the sampled candidates. This has
-// the effect of 'removing' the sampled labels that match the true labels by
-// making the classifier sure that they are sampled labels.
+// `Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
+// form square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is
+// a tensor shape `[..., M, K]`. If `adjoint` is `False` then each output matrix
+// satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
+// If `adjoint` is `True` then each output matrix satisfies
+// `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`.
//
// Arguments:
-// true_classes: The true_classes output of UnpackSparseLabels.
-// sampled_candidates: The sampled_candidates output of CandidateSampler.
-// num_true: Number of true labels per context.
+// matrix: Shape is `[..., M, M]`.
+// rhs: Shape is `[..., M, K]`.
//
-// Returns A vector of indices corresponding to rows of true_candidates.A vector of IDs of positions in sampled_candidates that match a true_label
-// for the row with the corresponding index in indices.A vector of the same length as indices and ids, in which each element
-// is -FLOAT_MAX.
-func ComputeAccidentalHits(scope *Scope, true_classes tf.Output, sampled_candidates tf.Output, num_true int64, optional ...ComputeAccidentalHitsAttr) (indices tf.Output, ids tf.Output, weights tf.Output) {
+// Returns Shape is `[..., M, K]`.
+func MatrixSolve(scope *Scope, matrix tf.Output, rhs tf.Output, optional ...MatrixSolveAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"num_true": num_true}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ComputeAccidentalHits",
+ Type: "MatrixSolve",
Input: []tf.Input{
- true_classes, sampled_candidates,
+ matrix, rhs,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
+ return op.Output(0)
}
-// StageClearAttr is an optional argument to StageClear.
-type StageClearAttr func(optionalAttr)
-
-// StageClearCapacity sets the optional capacity attribute to value.
-// If not specified, defaults to 0
+// Looks up keys in a table, outputs the corresponding values.
//
-// REQUIRES: value >= 0
-func StageClearCapacity(value int64) StageClearAttr {
- return func(m optionalAttr) {
- m["capacity"] = value
- }
-}
-
-// StageClearMemoryLimit sets the optional memory_limit attribute to value.
-// If not specified, defaults to 0
+// The tensor `keys` must of the same type as the keys of the table.
+// The output `values` is of the type of the table values.
//
-// REQUIRES: value >= 0
-func StageClearMemoryLimit(value int64) StageClearAttr {
- return func(m optionalAttr) {
- m["memory_limit"] = value
- }
-}
-
-// StageClearContainer sets the optional container attribute to value.
-// If not specified, defaults to ""
-func StageClearContainer(value string) StageClearAttr {
- return func(m optionalAttr) {
- m["container"] = value
- }
-}
-
-// StageClearSharedName sets the optional shared_name attribute to value.
-// If not specified, defaults to ""
-func StageClearSharedName(value string) StageClearAttr {
- return func(m optionalAttr) {
- m["shared_name"] = value
- }
-}
-
-// Op removes all elements in the underlying container.
+// The scalar `default_value` is the value output for keys not present in the
+// table. It must also be of the same type as the table values.
//
-// Returns the created operation.
-func StageClear(scope *Scope, dtypes []tf.DataType, optional ...StageClearAttr) (o *tf.Operation) {
+// Arguments:
+// table_handle: Handle to the table.
+// keys: Any shape. Keys to look up.
+//
+//
+// Returns Same shape as `keys`. Values found in the table, or `default_values`
+// for missing keys.
+func LookupTableFindV2(scope *Scope, table_handle tf.Output, keys tf.Output, default_value tf.Output) (values tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtypes": dtypes}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "StageClear",
-
- Attrs: attrs,
+ Type: "LookupTableFindV2",
+ Input: []tf.Input{
+ table_handle, keys, default_value,
+ },
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// AvgPoolGradAttr is an optional argument to AvgPoolGrad.
-type AvgPoolGradAttr func(optionalAttr)
-
-// AvgPoolGradDataFormat sets the optional data_format attribute to value.
+// Inverse 3D fast Fourier transform.
//
-// value: Specify the data format of the input and output data. With the
-// default format "NHWC", the data is stored in the order of:
-// [batch, in_height, in_width, in_channels].
-// Alternatively, the format could be "NCHW", the data storage order of:
-// [batch, in_channels, in_height, in_width].
-// If not specified, defaults to "NHWC"
-func AvgPoolGradDataFormat(value string) AvgPoolGradAttr {
- return func(m optionalAttr) {
- m["data_format"] = value
- }
-}
-
-// Computes gradients of the average pooling function.
+// Computes the inverse 3-dimensional discrete Fourier transform over the
+// inner-most 3 dimensions of `input`.
//
// Arguments:
-// orig_input_shape: 1-D. Shape of the original input to `avg_pool`.
-// grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t.
-// the output of `avg_pool`.
-// ksize: The size of the sliding window for each dimension of the input.
-// strides: The stride of the sliding window for each dimension of the input.
-// padding: The type of padding algorithm to use.
+// input: A complex64 tensor.
//
-// Returns 4-D. Gradients w.r.t. the input of `avg_pool`.
-func AvgPoolGrad(scope *Scope, orig_input_shape tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPoolGradAttr) (output tf.Output) {
+// Returns A complex64 tensor of the same shape as `input`. The inner-most 3
+// dimensions of `input` are replaced with their inverse 3D Fourier transform.
+//
+// @compatibility(numpy)
+// Equivalent to np.fft.ifftn with 3 dimensions.
+// @end_compatibility
+func IFFT3D(scope *Scope, input tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "AvgPoolGrad",
+ Type: "IFFT3D",
Input: []tf.Input{
- orig_input_shape, grad,
+ input,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Computes the maximum along segments of a tensor.
-//
-// Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
-// segments.
-//
-// Computes a tensor such that
-// \\(output_i = \max_j(data_j)\\) where `max` is over `j` such
-// that `segment_ids[j] == i`.
+// Adds `bias` to `value`.
//
-// If the max is empty for a given segment ID `i`, `output[i] = 0`.
+// This is a deprecated version of BiasAdd and will be soon removed.
//
-// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMax.png" alt>
-// </div>
+// This is a special case of `tf.add` where `bias` is restricted to be 1-D.
+// Broadcasting is supported, so `value` may have any number of dimensions.
//
// Arguments:
+// value: Any number of dimensions.
+// bias: 1-D with size the last dimension of `value`.
//
-// segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s
-// first dimension. Values should be sorted and can be repeated.
-//
-// Returns Has same shape as data, except for dimension 0 which
-// has size `k`, the number of segments.
-func SegmentMax(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
+// Returns Broadcasted sum of `value` and `bias`.
+func BiasAddV1(scope *Scope, value tf.Output, bias tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "SegmentMax",
+ Type: "BiasAddV1",
Input: []tf.Input{
- data, segment_ids,
+ value, bias,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Makes its input available to the next iteration.
+// Reverses specific dimensions of a tensor.
+//
+// NOTE `tf.reverse` has now changed behavior in preparation for 1.0.
+// `tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0.
+//
+// Given a `tensor`, and a `int32` tensor `axis` representing the set of
+// dimensions of `tensor` to reverse. This operation reverses each dimension
+// `i` for which there exists `j` s.t. `axis[j] == i`.
+//
+// `tensor` can have up to 8 dimensions. The number of dimensions specified
+// in `axis` may be 0 or more entries. If an index is specified more than
+// once, a InvalidArgument error is raised.
+//
+// For example:
+//
+// ```
+// # tensor 't' is [[[[ 0, 1, 2, 3],
+// # [ 4, 5, 6, 7],
+// # [ 8, 9, 10, 11]],
+// # [[12, 13, 14, 15],
+// # [16, 17, 18, 19],
+// # [20, 21, 22, 23]]]]
+// # tensor 't' shape is [1, 2, 3, 4]
+//
+// # 'dims' is [3] or 'dims' is [-1]
+// reverse(t, dims) ==> [[[[ 3, 2, 1, 0],
+// [ 7, 6, 5, 4],
+// [ 11, 10, 9, 8]],
+// [[15, 14, 13, 12],
+// [19, 18, 17, 16],
+// [23, 22, 21, 20]]]]
+//
+// # 'dims' is '[1]' (or 'dims' is '[-3]')
+// reverse(t, dims) ==> [[[[12, 13, 14, 15],
+// [16, 17, 18, 19],
+// [20, 21, 22, 23]
+// [[ 0, 1, 2, 3],
+// [ 4, 5, 6, 7],
+// [ 8, 9, 10, 11]]]]
+//
+// # 'dims' is '[2]' (or 'dims' is '[-2]')
+// reverse(t, dims) ==> [[[[8, 9, 10, 11],
+// [4, 5, 6, 7],
+// [0, 1, 2, 3]]
+// [[20, 21, 22, 23],
+// [16, 17, 18, 19],
+// [12, 13, 14, 15]]]]
+// ```
//
// Arguments:
-// data: The tensor to be made available to the next iteration.
+// tensor: Up to 8-D.
+// axis: 1-D. The indices of the dimensions to reverse. Must be in the range
+// `[-rank(tensor), rank(tensor))`.
//
-// Returns The same tensor as `data`.
-func NextIteration(scope *Scope, data tf.Output) (output tf.Output) {
+// Returns The same shape as `tensor`.
+func ReverseV2(scope *Scope, tensor tf.Output, axis tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "NextIteration",
+ Type: "ReverseV2",
Input: []tf.Input{
- data,
+ tensor, axis,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Does nothing. Only useful as a placeholder for control edges.
-//
-// Returns the created operation.
-func NoOp(scope *Scope) (o *tf.Operation) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "NoOp",
+// RealAttr is an optional argument to Real.
+type RealAttr func(optionalAttr)
+
+// RealTout sets the optional Tout attribute to value.
+// If not specified, defaults to DT_FLOAT
+func RealTout(value tf.DataType) RealAttr {
+ return func(m optionalAttr) {
+ m["Tout"] = value
}
- return scope.AddOperation(opspec)
}
-// Returns the rank of a tensor.
+// Returns the real part of a complex number.
//
-// This operation returns an integer representing the rank of `input`.
+// Given a tensor `input` of complex numbers, this operation returns a tensor of
+// type `float` that is the real part of each element in `input`. All elements in
+// `input` must be complex numbers of the form \\(a + bj\\), where *a* is the real
+// part returned by this operation and *b* is the imaginary part.
//
// For example:
//
// ```
-// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
-// # shape of tensor 't' is [2, 2, 3]
-// rank(t) ==> 3
+// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
+// tf.real(input) ==> [-2.25, 3.25]
// ```
-//
-// **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank
-// of a tensor is the number of indices required to uniquely select each element
-// of the tensor. Rank is also known as "order", "degree", or "ndims."
-func Rank(scope *Scope, input tf.Output) (output tf.Output) {
+func Real(scope *Scope, input tf.Output, optional ...RealAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "Rank",
+ Type: "Real",
Input: []tf.Input{
input,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// DecodeCSVAttr is an optional argument to DecodeCSV.
-type DecodeCSVAttr func(optionalAttr)
+// AudioSummaryAttr is an optional argument to AudioSummary.
+type AudioSummaryAttr func(optionalAttr)
-// DecodeCSVFieldDelim sets the optional field_delim attribute to value.
+// AudioSummaryMaxOutputs sets the optional max_outputs attribute to value.
//
-// value: char delimiter to separate fields in a record.
-// If not specified, defaults to ","
-func DecodeCSVFieldDelim(value string) DecodeCSVAttr {
- return func(m optionalAttr) {
- m["field_delim"] = value
- }
-}
-
-// DecodeCSVUseQuoteDelim sets the optional use_quote_delim attribute to value.
+// value: Max number of batch elements to generate audio for.
+// If not specified, defaults to 3
//
-// value: If false, treats double quotation marks as regular
-// characters inside of the string fields (ignoring RFC 4180, Section 2,
-// Bullet 5).
-// If not specified, defaults to true
-func DecodeCSVUseQuoteDelim(value bool) DecodeCSVAttr {
+// REQUIRES: value >= 1
+func AudioSummaryMaxOutputs(value int64) AudioSummaryAttr {
return func(m optionalAttr) {
- m["use_quote_delim"] = value
+ m["max_outputs"] = value
}
}
-// DecodeCSVNaValue sets the optional na_value attribute to value.
+// Outputs a `Summary` protocol buffer with audio.
//
-// value: Additional string to recognize as NA/NaN.
-// If not specified, defaults to ""
-func DecodeCSVNaValue(value string) DecodeCSVAttr {
- return func(m optionalAttr) {
- m["na_value"] = value
- }
-}
-
-// Convert CSV records to tensors. Each column maps to one tensor.
+// DEPRECATED at GraphDef version 15: Use AudioSummaryV2.
//
-// RFC 4180 format is expected for the CSV records.
-// (https://tools.ietf.org/html/rfc4180)
-// Note that we allow leading and trailing spaces with int or float field.
+// The summary has up to `max_outputs` summary values containing audio. The
+// audio is built from `tensor` which must be 3-D with shape `[batch_size,
+// frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
+// assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
+//
+// The `tag` argument is a scalar `Tensor` of type `string`. It is used to
+// build the `tag` of the summary values:
+//
+// * If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
+// * If `max_outputs` is greater than 1, the summary value tags are
+// generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
//
// Arguments:
-// records: Each string is a record/row in the csv and all records should have
-// the same format.
-// record_defaults: One tensor per column of the input record, with either a
-// scalar default value for that column or empty if the column is required.
+// tag: Scalar. Used to build the `tag` attribute of the summary values.
+// tensor: 2-D of shape `[batch_size, frames]`.
+// sample_rate: The sample rate of the signal in hertz.
//
-// Returns Each tensor will have the same shape as records.
-func DecodeCSV(scope *Scope, records tf.Output, record_defaults []tf.Output, optional ...DecodeCSVAttr) (output []tf.Output) {
+// Returns Scalar. Serialized `Summary` protocol buffer.
+func AudioSummary(scope *Scope, tag tf.Output, tensor tf.Output, sample_rate float32, optional ...AudioSummaryAttr) (summary tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"sample_rate": sample_rate}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "DecodeCSV",
+ Type: "AudioSummary",
Input: []tf.Input{
- records, tf.OutputList(record_defaults),
+ tag, tensor,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- if scope.Err() != nil {
- return
- }
- var idx int
- var err error
- if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
- scope.UpdateErr("DecodeCSV", err)
- return
+ return op.Output(0)
+}
+
+// QrAttr is an optional argument to Qr.
+type QrAttr func(optionalAttr)
+
+// QrFullMatrices sets the optional full_matrices attribute to value.
+//
+// value: If true, compute full-sized `q` and `r`. If false
+// (the default), compute only the leading `P` columns of `q`.
+// If not specified, defaults to false
+func QrFullMatrices(value bool) QrAttr {
+ return func(m optionalAttr) {
+ m["full_matrices"] = value
}
- return output
}
-// Transforms a serialized tensorflow.TensorProto proto into a Tensor.
+// Computes the QR decompositions of one or more matrices.
+//
+// Computes the QR decomposition of each inner matrix in `tensor` such that
+// `tensor[..., :, :] = q[..., :, :] * r[..., :,:])`
+//
+// ```python
+// # a is a tensor.
+// # q is a tensor of orthonormal matrices.
+// # r is a tensor of upper triangular matrices.
+// q, r = qr(a)
+// q_full, r_full = qr(a, full_matrices=True)
+// ```
//
// Arguments:
-// serialized: A scalar string containing a serialized TensorProto proto.
-// out_type: The type of the serialized tensor. The provided type must match the
-// type of the serialized tensor and no implicit conversion will take place.
+// input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
+// form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.
//
-// Returns A Tensor of type `out_type`.
-func ParseTensor(scope *Scope, serialized tf.Output, out_type tf.DataType) (output tf.Output) {
+// Returns Orthonormal basis for range of `a`. If `full_matrices` is `False` then
+// shape is `[..., M, P]`; if `full_matrices` is `True` then shape is
+// `[..., M, M]`.Triangular factor. If `full_matrices` is `False` then shape is
+// `[..., P, N]`. If `full_matrices` is `True` then shape is `[..., M, N]`.
+func Qr(scope *Scope, input tf.Output, optional ...QrAttr) (q tf.Output, r tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"out_type": out_type}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "ParseTensor",
+ Type: "Qr",
Input: []tf.Input{
- serialized,
+ input,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1)
}
-// Computes acos of x element-wise.
-func Acos(scope *Scope, x tf.Output) (y tf.Output) {
+// Records the bytes size of each element of `input_dataset` in a StatsAggregator.
+func BytesProducedStatsDataset(scope *Scope, input_dataset tf.Output, tag tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
opspec := tf.OpSpec{
- Type: "Acos",
+ Type: "BytesProducedStatsDataset",
Input: []tf.Input{
- x,
+ input_dataset, tag,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Writes a `Summary` protocol buffer with scalar values.
-//
-// The input `tag` and `value` must have the scalars.
-//
-// Arguments:
-// writer: A handle to a summary writer.
-// step: The step to write the summary for.
-// tag: Tag for the summary.
-// value: Value for the summary.
+// ResourceSparseApplyProximalGradientDescentAttr is an optional argument to ResourceSparseApplyProximalGradientDescent.
+type ResourceSparseApplyProximalGradientDescentAttr func(optionalAttr)
+
+// ResourceSparseApplyProximalGradientDescentUseLocking sets the optional use_locking attribute to value.
//
-// Returns the created operation.
-func WriteScalarSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, value tf.Output) (o *tf.Operation) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "WriteScalarSummary",
- Input: []tf.Input{
- writer, step, tag, value,
- },
+// value: If True, the subtraction will be protected by a lock;
+// otherwise the behavior is undefined, but may exhibit less contention.
+// If not specified, defaults to false
+func ResourceSparseApplyProximalGradientDescentUseLocking(value bool) ResourceSparseApplyProximalGradientDescentAttr {
+ return func(m optionalAttr) {
+ m["use_locking"] = value
}
- return scope.AddOperation(opspec)
}
-// Transforms a tf.Example proto (as a string) into typed tensors.
+// Sparse update '*var' as FOBOS algorithm with fixed learning rate.
+//
+// That is for rows we have grad for, we update var as follows:
+// prox_v = var - alpha * grad
+// var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
//
// Arguments:
-// serialized: A vector containing a batch of binary serialized Example protos.
-// dense_defaults: A list of Tensors (some may be empty), whose length matches
-// the length of `dense_keys`. dense_defaults[j] provides default values
-// when the example's feature_map lacks dense_key[j]. If an empty Tensor is
-// provided for dense_defaults[j], then the Feature dense_keys[j] is required.
-// The input type is inferred from dense_defaults[j], even when it's empty.
-// If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined,
-// then the shape of dense_defaults[j] must match that of dense_shapes[j].
-// If dense_shapes[j] has an undefined major dimension (variable strides dense
-// feature), dense_defaults[j] must contain a single element:
-// the padding element.
-// num_sparse: The number of sparse features to be parsed from the example. This
-// must match the lengths of `sparse_keys` and `sparse_types`.
-// sparse_keys: A list of `num_sparse` strings.
-// The keys expected in the Examples' features associated with sparse values.
-// dense_keys: The keys expected in the Examples' features associated with dense
-// values.
-// sparse_types: A list of `num_sparse` types; the data types of data in each
-// Feature given in sparse_keys.
-// Currently the ParseSingleExample op supports DT_FLOAT (FloatList),
-// DT_INT64 (Int64List), and DT_STRING (BytesList).
-// dense_shapes: The shapes of data in each Feature given in dense_keys.
-// The length of this list must match the length of `dense_keys`. The
-// number of elements in the Feature corresponding to dense_key[j] must
-// always equal dense_shapes[j].NumEntries(). If dense_shapes[j] ==
-// (D0, D1, ..., DN) then the shape of output Tensor dense_values[j]
-// will be (D0, D1, ..., DN): In the case dense_shapes[j] = (-1, D1,
-// ..., DN), the shape of the output Tensor dense_values[j] will be (M,
-// D1, .., DN), where M is the number of blocks of elements of length
-// D1 * .... * DN, in the input.
-func ParseSingleExample(scope *Scope, serialized tf.Output, dense_defaults []tf.Output, num_sparse int64, sparse_keys []string, dense_keys []string, sparse_types []tf.DataType, dense_shapes []tf.Shape) (sparse_indices []tf.Output, sparse_values []tf.Output, sparse_shapes []tf.Output, dense_values []tf.Output) {
+// var_: Should be from a Variable().
+// alpha: Scaling factor. Must be a scalar.
+// l1: L1 regularization. Must be a scalar.
+// l2: L2 regularization. Must be a scalar.
+// grad: The gradient.
+// indices: A vector of indices into the first dimension of var and accum.
+//
+// Returns the created operation.
+func ResourceSparseApplyProximalGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyProximalGradientDescentAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"num_sparse": num_sparse, "sparse_keys": sparse_keys, "dense_keys": dense_keys, "sparse_types": sparse_types, "dense_shapes": dense_shapes}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "ParseSingleExample",
+ Type: "ResourceSparseApplyProximalGradientDescent",
Input: []tf.Input{
- serialized, tf.OutputList(dense_defaults),
+ var_, alpha, l1, l2, grad, indices,
},
Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- if scope.Err() != nil {
- return
- }
- var idx int
- var err error
- if sparse_indices, idx, err = makeOutputList(op, idx, "sparse_indices"); err != nil {
- scope.UpdateErr("ParseSingleExample", err)
- return
- }
- if sparse_values, idx, err = makeOutputList(op, idx, "sparse_values"); err != nil {
- scope.UpdateErr("ParseSingleExample", err)
- return
- }
- if sparse_shapes, idx, err = makeOutputList(op, idx, "sparse_shapes"); err != nil {
- scope.UpdateErr("ParseSingleExample", err)
- return
- }
- if dense_values, idx, err = makeOutputList(op, idx, "dense_values"); err != nil {
- scope.UpdateErr("ParseSingleExample", err)
- return
- }
- return sparse_indices, sparse_values, sparse_shapes, dense_values
+ return scope.AddOperation(opspec)
}
-// DecodeCompressedAttr is an optional argument to DecodeCompressed.
-type DecodeCompressedAttr func(optionalAttr)
+// MeanAttr is an optional argument to Mean.
+type MeanAttr func(optionalAttr)
-// DecodeCompressedCompressionType sets the optional compression_type attribute to value.
+// MeanKeepDims sets the optional keep_dims attribute to value.
//
-// value: A scalar containing either (i) the empty string (no
-// compression), (ii) "ZLIB", or (iii) "GZIP".
-// If not specified, defaults to ""
-func DecodeCompressedCompressionType(value string) DecodeCompressedAttr {
+// value: If true, retain reduced dimensions with length 1.
+// If not specified, defaults to false
+func MeanKeepDims(value bool) MeanAttr {
return func(m optionalAttr) {
- m["compression_type"] = value
+ m["keep_dims"] = value
}
}
-// Decompress strings.
-//
-// This op decompresses each element of the `bytes` input `Tensor`, which
-// is assumed to be compressed using the given `compression_type`.
+// Computes the mean of elements across dimensions of a tensor.
//
-// The `output` is a string `Tensor` of the same shape as `bytes`,
-// each element containing the decompressed data from the corresponding
-// element in `bytes`.
+// Reduces `input` along the dimensions given in `axis`. Unless
+// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+// `axis`. If `keep_dims` is true, the reduced dimensions are
+// retained with length 1.
//
// Arguments:
-// bytes: A Tensor of string which is compressed.
+// input: The tensor to reduce.
+// axis: The dimensions to reduce. Must be in the range
+// `[-rank(input), rank(input))`.
//
-// Returns A Tensor with the same shape as input `bytes`, uncompressed
-// from bytes.
-func DecodeCompressed(scope *Scope, bytes tf.Output, optional ...DecodeCompressedAttr) (output tf.Output) {
+// Returns The reduced tensor.
+func Mean(scope *Scope, input tf.Output, axis tf.Output, optional ...MeanAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -9951,9 +10198,9 @@ func DecodeCompressed(scope *Scope, bytes tf.Output, optional ...DecodeCompresse
a(attrs)
}
opspec := tf.OpSpec{
- Type: "DecodeCompressed",
+ Type: "Mean",
Input: []tf.Input{
- bytes,
+ input, axis,
},
Attrs: attrs,
}
@@ -9961,213 +10208,206 @@ func DecodeCompressed(scope *Scope, bytes tf.Output, optional ...DecodeCompresse
return op.Output(0)
}
-// Copy a tensor setting everything outside a central band in each innermost matrix
-//
-// to zero.
-//
-// The `band` part is computed as follows:
-// Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
-// tensor with the same shape where
-//
-// `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.
-//
-// The indicator function
-//
-// `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) &&
-// (num_upper < 0 || (n-m) <= num_upper)`.
-//
-// For example:
+// InitializeTableFromTextFileV2Attr is an optional argument to InitializeTableFromTextFileV2.
+type InitializeTableFromTextFileV2Attr func(optionalAttr)
+
+// InitializeTableFromTextFileV2VocabSize sets the optional vocab_size attribute to value.
//
-// ```
-// # if 'input' is [[ 0, 1, 2, 3]
-// [-1, 0, 1, 2]
-// [-2, -1, 0, 1]
-// [-3, -2, -1, 0]],
+// value: Number of elements of the file, use -1 if unknown.
+// If not specified, defaults to -1
//
-// tf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3]
-// [-1, 0, 1, 2]
-// [ 0, -1, 0, 1]
-// [ 0, 0, -1, 0]],
+// REQUIRES: value >= -1
+func InitializeTableFromTextFileV2VocabSize(value int64) InitializeTableFromTextFileV2Attr {
+ return func(m optionalAttr) {
+ m["vocab_size"] = value
+ }
+}
+
+// InitializeTableFromTextFileV2Delimiter sets the optional delimiter attribute to value.
//
-// tf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0]
-// [-1, 0, 1, 0]
-// [-2, -1, 0, 1]
-// [ 0, -2, -1, 0]]
-// ```
+// value: Delimiter to separate fields in a line.
+// If not specified, defaults to "\t"
+func InitializeTableFromTextFileV2Delimiter(value string) InitializeTableFromTextFileV2Attr {
+ return func(m optionalAttr) {
+ m["delimiter"] = value
+ }
+}
+
+// Initializes a table from a text file.
//
-// Useful special cases:
+// It inserts one key-value pair into the table for each line of the file.
+// The key and value is extracted from the whole line content, elements from the
+// split line based on `delimiter` or the line number (starting from zero).
+// Where to extract the key and value from a line is specified by `key_index` and
+// `value_index`.
//
-// ```
-// tf.matrix_band_part(input, 0, -1) ==> Upper triangular part.
-// tf.matrix_band_part(input, -1, 0) ==> Lower triangular part.
-// tf.matrix_band_part(input, 0, 0) ==> Diagonal.
-// ```
+// - A value of -1 means use the line number(starting from zero), expects `int64`.
+// - A value of -2 means use the whole line content, expects `string`.
+// - A value >= 0 means use the index (starting at zero) of the split line based
+// on `delimiter`.
//
// Arguments:
-// input: Rank `k` tensor.
-// num_lower: 0-D tensor. Number of subdiagonals to keep. If negative, keep entire
-// lower triangle.
-// num_upper: 0-D tensor. Number of superdiagonals to keep. If negative, keep
-// entire upper triangle.
+// table_handle: Handle to a table which will be initialized.
+// filename: Filename of a vocabulary text file.
+// key_index: Column index in a line to get the table `key` values from.
+// value_index: Column index that represents information of a line to get the table
+// `value` values from.
//
-// Returns Rank `k` tensor of the same shape as input. The extracted banded tensor.
-func MatrixBandPart(scope *Scope, input tf.Output, num_lower tf.Output, num_upper tf.Output) (band tf.Output) {
+// Returns the created operation.
+func InitializeTableFromTextFileV2(scope *Scope, table_handle tf.Output, filename tf.Output, key_index int64, value_index int64, optional ...InitializeTableFromTextFileV2Attr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"key_index": key_index, "value_index": value_index}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "MatrixBandPart",
+ Type: "InitializeTableFromTextFileV2",
Input: []tf.Input{
- input, num_lower, num_upper,
+ table_handle, filename,
},
+ Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// DecodeRawAttr is an optional argument to DecodeRaw.
-type DecodeRawAttr func(optionalAttr)
+// QuantizedReluAttr is an optional argument to QuantizedRelu.
+type QuantizedReluAttr func(optionalAttr)
-// DecodeRawLittleEndian sets the optional little_endian attribute to value.
-//
-// value: Whether the input `bytes` are in little-endian order.
-// Ignored for `out_type` values that are stored in a single byte like
-// `uint8`.
-// If not specified, defaults to true
-func DecodeRawLittleEndian(value bool) DecodeRawAttr {
+// QuantizedReluOutType sets the optional out_type attribute to value.
+// If not specified, defaults to DT_QUINT8
+func QuantizedReluOutType(value tf.DataType) QuantizedReluAttr {
return func(m optionalAttr) {
- m["little_endian"] = value
+ m["out_type"] = value
}
}
-// Reinterpret the bytes of a string as a vector of numbers.
+// Computes Quantized Rectified Linear: `max(features, 0)`
//
// Arguments:
-// bytes: All the elements must have the same length.
//
+// min_features: The float value that the lowest quantized value represents.
+// max_features: The float value that the highest quantized value represents.
//
-// Returns A Tensor with one more dimension than the input `bytes`. The
-// added dimension will have size equal to the length of the elements
-// of `bytes` divided by the number of bytes to represent `out_type`.
-func DecodeRaw(scope *Scope, bytes tf.Output, out_type tf.DataType, optional ...DecodeRawAttr) (output tf.Output) {
+// Returns Has the same output shape as "features".The float value that the lowest quantized value represents.The float value that the highest quantized value represents.
+func QuantizedRelu(scope *Scope, features tf.Output, min_features tf.Output, max_features tf.Output, optional ...QuantizedReluAttr) (activations tf.Output, min_activations tf.Output, max_activations tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"out_type": out_type}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "DecodeRaw",
+ Type: "QuantizedRelu",
Input: []tf.Input{
- bytes,
+ features, min_features, max_features,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// OrderedMapIncompleteSizeAttr is an optional argument to OrderedMapIncompleteSize.
-type OrderedMapIncompleteSizeAttr func(optionalAttr)
-
-// OrderedMapIncompleteSizeCapacity sets the optional capacity attribute to value.
-// If not specified, defaults to 0
+// Reshapes a SparseTensor to represent values in a new dense shape.
//
-// REQUIRES: value >= 0
-func OrderedMapIncompleteSizeCapacity(value int64) OrderedMapIncompleteSizeAttr {
- return func(m optionalAttr) {
- m["capacity"] = value
- }
-}
-
-// OrderedMapIncompleteSizeMemoryLimit sets the optional memory_limit attribute to value.
-// If not specified, defaults to 0
+// This operation has the same semantics as reshape on the represented dense
+// tensor. The `input_indices` are recomputed based on the requested `new_shape`.
//
-// REQUIRES: value >= 0
-func OrderedMapIncompleteSizeMemoryLimit(value int64) OrderedMapIncompleteSizeAttr {
- return func(m optionalAttr) {
- m["memory_limit"] = value
- }
-}
-
-// OrderedMapIncompleteSizeContainer sets the optional container attribute to value.
-// If not specified, defaults to ""
-func OrderedMapIncompleteSizeContainer(value string) OrderedMapIncompleteSizeAttr {
- return func(m optionalAttr) {
- m["container"] = value
+// If one component of `new_shape` is the special value -1, the size of that
+// dimension is computed so that the total dense size remains constant. At
+// most one component of `new_shape` can be -1. The number of dense elements
+// implied by `new_shape` must be the same as the number of dense elements
+// originally implied by `input_shape`.
+//
+// Reshaping does not affect the order of values in the SparseTensor.
+//
+// If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape`
+// has length `R_out`, then `input_indices` has shape `[N, R_in]`,
+// `input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and
+// `output_shape` has length `R_out`.
+//
+// Arguments:
+// input_indices: 2-D. `N x R_in` matrix with the indices of non-empty values in a
+// SparseTensor.
+// input_shape: 1-D. `R_in` vector with the input SparseTensor's dense shape.
+// new_shape: 1-D. `R_out` vector with the requested new dense shape.
+//
+// Returns 2-D. `N x R_out` matrix with the updated indices of non-empty
+// values in the output SparseTensor.1-D. `R_out` vector with the full dense shape of the output
+// SparseTensor. This is the same as `new_shape` but with any -1 dimensions
+// filled in.
+func SparseReshape(scope *Scope, input_indices tf.Output, input_shape tf.Output, new_shape tf.Output) (output_indices tf.Output, output_shape tf.Output) {
+ if scope.Err() != nil {
+ return
}
-}
-
-// OrderedMapIncompleteSizeSharedName sets the optional shared_name attribute to value.
-// If not specified, defaults to ""
-func OrderedMapIncompleteSizeSharedName(value string) OrderedMapIncompleteSizeAttr {
- return func(m optionalAttr) {
- m["shared_name"] = value
+ opspec := tf.OpSpec{
+ Type: "SparseReshape",
+ Input: []tf.Input{
+ input_indices, input_shape, new_shape,
+ },
}
+ op := scope.AddOperation(opspec)
+ return op.Output(0), op.Output(1)
}
-// Op returns the number of incomplete elements in the underlying container.
-func OrderedMapIncompleteSize(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapIncompleteSizeAttr) (size tf.Output) {
+// Deprecated. Use TensorArraySplitV3
+func TensorArraySplitV2(scope *Scope, handle tf.Output, value tf.Output, lengths tf.Output, flow_in tf.Output) (flow_out tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtypes": dtypes}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "OrderedMapIncompleteSize",
-
- Attrs: attrs,
+ Type: "TensorArraySplitV2",
+ Input: []tf.Input{
+ handle, value, lengths, flow_in,
+ },
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// RandomShuffleAttr is an optional argument to RandomShuffle.
-type RandomShuffleAttr func(optionalAttr)
+// PackAttr is an optional argument to Pack.
+type PackAttr func(optionalAttr)
-// RandomShuffleSeed sets the optional seed attribute to value.
+// PackAxis sets the optional axis attribute to value.
//
-// value: If either `seed` or `seed2` are set to be non-zero, the random number
-// generator is seeded by the given seed. Otherwise, it is seeded by a
-// random seed.
+// value: Dimension along which to pack. Negative values wrap around, so the
+// valid range is `[-(R+1), R+1)`.
// If not specified, defaults to 0
-func RandomShuffleSeed(value int64) RandomShuffleAttr {
+func PackAxis(value int64) PackAttr {
return func(m optionalAttr) {
- m["seed"] = value
+ m["axis"] = value
}
}
-// RandomShuffleSeed2 sets the optional seed2 attribute to value.
+// Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor.
//
-// value: A second seed to avoid seed collision.
-// If not specified, defaults to 0
-func RandomShuffleSeed2(value int64) RandomShuffleAttr {
- return func(m optionalAttr) {
- m["seed2"] = value
- }
-}
-
-// Randomly shuffles a tensor along its first dimension.
+// Packs the `N` tensors in `values` into a tensor with rank one higher than each
+// tensor in `values`, by packing them along the `axis` dimension.
+// Given a list of tensors of shape `(A, B, C)`;
//
-// The tensor is shuffled along dimension 0, such that each `value[j]` is mapped
-// to one and only one `output[i]`. For example, a mapping that might occur for a
-// 3x2 tensor is:
+// if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
+// if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
+// Etc.
+//
+// For example:
//
// ```
-// [[1, 2], [[5, 6],
-// [3, 4], ==> [1, 2],
-// [5, 6]] [3, 4]]
+// # 'x' is [1, 4]
+// # 'y' is [2, 5]
+// # 'z' is [3, 6]
+// pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.
+// pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
// ```
//
+// This is the opposite of `unpack`.
+//
// Arguments:
-// value: The tensor to be shuffled.
+// values: Must be of same shape and type.
//
-// Returns A tensor of same shape and type as `value`, shuffled along its first
-// dimension.
-func RandomShuffle(scope *Scope, value tf.Output, optional ...RandomShuffleAttr) (output tf.Output) {
+// Returns The packed tensor.
+func Pack(scope *Scope, values []tf.Output, optional ...PackAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -10176,9 +10416,9 @@ func RandomShuffle(scope *Scope, value tf.Output, optional ...RandomShuffleAttr)
a(attrs)
}
opspec := tf.OpSpec{
- Type: "RandomShuffle",
+ Type: "Pack",
Input: []tf.Input{
- value,
+ tf.OutputList(values),
},
Attrs: attrs,
}
@@ -10186,39 +10426,86 @@ func RandomShuffle(scope *Scope, value tf.Output, optional ...RandomShuffleAttr)
return op.Output(0)
}
-// FakeQuantWithMinMaxVarsPerChannelAttr is an optional argument to FakeQuantWithMinMaxVarsPerChannel.
-type FakeQuantWithMinMaxVarsPerChannelAttr func(optionalAttr)
+// Reorders a SparseTensor into the canonical, row-major ordering.
+//
+// Note that by convention, all sparse ops preserve the canonical ordering along
+// increasing dimension number. The only time ordering can be violated is during
+// manual manipulation of the indices and values vectors to add entries.
+//
+// Reordering does not affect the shape of the SparseTensor.
+//
+// If the tensor has rank `R` and `N` non-empty values, `input_indices` has
+// shape `[N, R]`, input_values has length `N`, and input_shape has length `R`.
+//
+// Arguments:
+// input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
+// SparseTensor, possibly not in canonical ordering.
+// input_values: 1-D. `N` non-empty values corresponding to `input_indices`.
+// input_shape: 1-D. Shape of the input SparseTensor.
+//
+// Returns 2-D. `N x R` matrix with the same indices as input_indices, but
+// in canonical row-major ordering.1-D. `N` non-empty values corresponding to `output_indices`.
+func SparseReorder(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output) (output_indices tf.Output, output_values tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "SparseReorder",
+ Input: []tf.Input{
+ input_indices, input_values, input_shape,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0), op.Output(1)
+}
-// FakeQuantWithMinMaxVarsPerChannelNumBits sets the optional num_bits attribute to value.
-// If not specified, defaults to 8
-func FakeQuantWithMinMaxVarsPerChannelNumBits(value int64) FakeQuantWithMinMaxVarsPerChannelAttr {
- return func(m optionalAttr) {
- m["num_bits"] = value
+// Computes rectified linear: `max(features, 0)`.
+func Relu(scope *Scope, features tf.Output) (activations tf.Output) {
+ if scope.Err() != nil {
+ return
}
+ opspec := tf.OpSpec{
+ Type: "Relu",
+ Input: []tf.Input{
+ features,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// FakeQuantWithMinMaxVarsPerChannelNarrowRange sets the optional narrow_range attribute to value.
+// ResourceApplyAddSignAttr is an optional argument to ResourceApplyAddSign.
+type ResourceApplyAddSignAttr func(optionalAttr)
+
+// ResourceApplyAddSignUseLocking sets the optional use_locking attribute to value.
+//
+// value: If `True`, updating of the var and m tensors is
+// protected by a lock; otherwise the behavior is undefined, but may exhibit less
+// contention.
// If not specified, defaults to false
-func FakeQuantWithMinMaxVarsPerChannelNarrowRange(value bool) FakeQuantWithMinMaxVarsPerChannelAttr {
+func ResourceApplyAddSignUseLocking(value bool) ResourceApplyAddSignAttr {
return func(m optionalAttr) {
- m["narrow_range"] = value
+ m["use_locking"] = value
}
}
-// Fake-quantize the 'inputs' tensor of type float and one of the shapes: `[d]`,
+// Update '*var' according to the AddSign update.
//
-// `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` of shape `[d]`
-// to 'outputs' tensor of same shape as `inputs`.
+// m_t <- beta1 * m_{t-1} + (1 - beta1) * g
+// update <- (alpha + sign_decay * sign(g) *sign(m)) * g
+// variable <- variable - lr_t * update
//
-// `[min; max]` define the clamping range for the `inputs` data.
-// `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
-// when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
-// then de-quantized and output as floats in `[min; max]` interval.
-// `num_bits` is the bitwidth of the quantization; between 2 and 8, inclusive.
+// Arguments:
+// var_: Should be from a Variable().
+// m: Should be from a Variable().
+// lr: Scaling factor. Must be a scalar.
+// alpha: Must be a scalar.
+// sign_decay: Must be a scalar.
+// beta: Must be a scalar.
+// grad: The gradient.
//
-// This operation has a gradient and thus allows for training `min` and `max`
-// values.
-func FakeQuantWithMinMaxVarsPerChannel(scope *Scope, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsPerChannelAttr) (outputs tf.Output) {
+// Returns the created operation.
+func ResourceApplyAddSign(scope *Scope, var_ tf.Output, m tf.Output, lr tf.Output, alpha tf.Output, sign_decay tf.Output, beta tf.Output, grad tf.Output, optional ...ResourceApplyAddSignAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
@@ -10227,65 +10514,61 @@ func FakeQuantWithMinMaxVarsPerChannel(scope *Scope, inputs tf.Output, min tf.Ou
a(attrs)
}
opspec := tf.OpSpec{
- Type: "FakeQuantWithMinMaxVarsPerChannel",
+ Type: "ResourceApplyAddSign",
Input: []tf.Input{
- inputs, min, max,
+ var_, m, lr, alpha, sign_decay, beta, grad,
},
Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// TruncatedNormalAttr is an optional argument to TruncatedNormal.
-type TruncatedNormalAttr func(optionalAttr)
+// FractionalMaxPoolGradAttr is an optional argument to FractionalMaxPoolGrad.
+type FractionalMaxPoolGradAttr func(optionalAttr)
-// TruncatedNormalSeed sets the optional seed attribute to value.
+// FractionalMaxPoolGradOverlapping sets the optional overlapping attribute to value.
//
-// value: If either `seed` or `seed2` are set to be non-zero, the random number
-// generator is seeded by the given seed. Otherwise, it is seeded by a
-// random seed.
-// If not specified, defaults to 0
-func TruncatedNormalSeed(value int64) TruncatedNormalAttr {
- return func(m optionalAttr) {
- m["seed"] = value
- }
-}
-
-// TruncatedNormalSeed2 sets the optional seed2 attribute to value.
+// value: When set to True, it means when pooling, the values at the boundary
+// of adjacent pooling cells are used by both cells. For example:
//
-// value: A second seed to avoid seed collision.
-// If not specified, defaults to 0
-func TruncatedNormalSeed2(value int64) TruncatedNormalAttr {
+// `index 0 1 2 3 4`
+//
+// `value 20 5 16 3 7`
+//
+// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
+// The result would be [20, 16] for fractional max pooling.
+// If not specified, defaults to false
+func FractionalMaxPoolGradOverlapping(value bool) FractionalMaxPoolGradAttr {
return func(m optionalAttr) {
- m["seed2"] = value
+ m["overlapping"] = value
}
}
-// Outputs random values from a truncated normal distribution.
-//
-// The generated values follow a normal distribution with mean 0 and standard
-// deviation 1, except that values whose magnitude is more than 2 standard
-// deviations from the mean are dropped and re-picked.
+// Computes gradient of the FractionalMaxPool function.
//
// Arguments:
-// shape: The shape of the output tensor.
-// dtype: The type of the output.
+// orig_input: Original input for `fractional_max_pool`
+// orig_output: Original output for `fractional_max_pool`
+// out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients
+// w.r.t. the output of `fractional_max_pool`.
+// row_pooling_sequence: row pooling sequence, form pooling region with
+// col_pooling_sequence.
+// col_pooling_sequence: column pooling sequence, form pooling region with
+// row_pooling sequence.
//
-// Returns A tensor of the specified shape filled with random truncated normal
-// values.
-func TruncatedNormal(scope *Scope, shape tf.Output, dtype tf.DataType, optional ...TruncatedNormalAttr) (output tf.Output) {
+// Returns 4-D. Gradients w.r.t. the input of `fractional_max_pool`.
+func FractionalMaxPoolGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, out_backprop tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output, optional ...FractionalMaxPoolGradAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtype": dtype}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "TruncatedNormal",
+ Type: "FractionalMaxPoolGrad",
Input: []tf.Input{
- shape,
+ orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence,
},
Attrs: attrs,
}
@@ -10293,44 +10576,34 @@ func TruncatedNormal(scope *Scope, shape tf.Output, dtype tf.DataType, optional
return op.Output(0)
}
-// ResourceApplyFtrlV2Attr is an optional argument to ResourceApplyFtrlV2.
-type ResourceApplyFtrlV2Attr func(optionalAttr)
+// ResourceApplyAdagradDAAttr is an optional argument to ResourceApplyAdagradDA.
+type ResourceApplyAdagradDAAttr func(optionalAttr)
-// ResourceApplyFtrlV2UseLocking sets the optional use_locking attribute to value.
+// ResourceApplyAdagradDAUseLocking sets the optional use_locking attribute to value.
//
-// value: If `True`, updating of the var and accum tensors will be protected
-// by a lock; otherwise the behavior is undefined, but may exhibit less
-// contention.
+// value: If True, updating of the var and accum tensors will be protected by
+// a lock; otherwise the behavior is undefined, but may exhibit less contention.
// If not specified, defaults to false
-func ResourceApplyFtrlV2UseLocking(value bool) ResourceApplyFtrlV2Attr {
+func ResourceApplyAdagradDAUseLocking(value bool) ResourceApplyAdagradDAAttr {
return func(m optionalAttr) {
m["use_locking"] = value
}
}
-// Update '*var' according to the Ftrl-proximal scheme.
-//
-// grad_with_shrinkage = grad + 2 * l2_shrinkage * var
-// accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
-// linear += grad_with_shrinkage +
-// (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
-// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
-// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
-// accum = accum_new
+// Update '*var' according to the proximal adagrad scheme.
//
// Arguments:
// var_: Should be from a Variable().
-// accum: Should be from a Variable().
-// linear: Should be from a Variable().
+// gradient_accumulator: Should be from a Variable().
+// gradient_squared_accumulator: Should be from a Variable().
// grad: The gradient.
// lr: Scaling factor. Must be a scalar.
-// l1: L1 regulariation. Must be a scalar.
-// l2: L2 shrinkage regulariation. Must be a scalar.
-//
-// lr_power: Scaling factor. Must be a scalar.
+// l1: L1 regularization. Must be a scalar.
+// l2: L2 regularization. Must be a scalar.
+// global_step: Training step number. Must be a scalar.
//
// Returns the created operation.
-func ResourceApplyFtrlV2(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, l2_shrinkage tf.Output, lr_power tf.Output, optional ...ResourceApplyFtrlV2Attr) (o *tf.Operation) {
+func ResourceApplyAdagradDA(scope *Scope, var_ tf.Output, gradient_accumulator tf.Output, gradient_squared_accumulator tf.Output, grad tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, global_step tf.Output, optional ...ResourceApplyAdagradDAAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
@@ -10339,128 +10612,85 @@ func ResourceApplyFtrlV2(scope *Scope, var_ tf.Output, accum tf.Output, linear t
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ResourceApplyFtrlV2",
+ Type: "ResourceApplyAdagradDA",
Input: []tf.Input{
- var_, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power,
+ var_, gradient_accumulator, gradient_squared_accumulator, grad, lr, l1, l2, global_step,
},
Attrs: attrs,
}
return scope.AddOperation(opspec)
}
-// SkipgramAttr is an optional argument to Skipgram.
-type SkipgramAttr func(optionalAttr)
+// SparseReduceMaxSparseAttr is an optional argument to SparseReduceMaxSparse.
+type SparseReduceMaxSparseAttr func(optionalAttr)
-// SkipgramWindowSize sets the optional window_size attribute to value.
+// SparseReduceMaxSparseKeepDims sets the optional keep_dims attribute to value.
//
-// value: The number of words to predict to the left and right of the target.
-// If not specified, defaults to 5
-func SkipgramWindowSize(value int64) SkipgramAttr {
+// value: If true, retain reduced dimensions with length 1.
+// If not specified, defaults to false
+func SparseReduceMaxSparseKeepDims(value bool) SparseReduceMaxSparseAttr {
return func(m optionalAttr) {
- m["window_size"] = value
+ m["keep_dims"] = value
}
}
-// SkipgramMinCount sets the optional min_count attribute to value.
+// Computes the max of elements across dimensions of a SparseTensor.
//
-// value: The minimum number of word occurrences for it to be included in the
-// vocabulary.
-// If not specified, defaults to 5
-func SkipgramMinCount(value int64) SkipgramAttr {
- return func(m optionalAttr) {
- m["min_count"] = value
- }
-}
-
-// SkipgramSubsample sets the optional subsample attribute to value.
+// This Op takes a SparseTensor and is the sparse counterpart to
+// `tf.reduce_max()`. In contrast to SparseReduceMax, this Op returns a
+// SparseTensor.
//
-// value: Threshold for word occurrence. Words that appear with higher
-// frequency will be randomly down-sampled. Set to 0 to disable.
-// If not specified, defaults to 0.001
-func SkipgramSubsample(value float32) SkipgramAttr {
- return func(m optionalAttr) {
- m["subsample"] = value
- }
-}
-
-// Parses a text file and creates a batch of examples.
+// Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
+// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
+// with length 1.
//
-// DEPRECATED at GraphDef version 19: Moving word2vec into tensorflow_models/tutorials and deprecating its ops here as a result
+// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
+// with a single element is returned. Additionally, the axes can be negative,
+// which are interpreted according to the indexing rules in Python.
//
// Arguments:
-// filename: The corpus's text file name.
-// batch_size: The size of produced batch.
-//
-// Returns A vector of words in the corpus.Frequencies of words. Sorted in the non-ascending order.Number of words per epoch in the data file.The current epoch number.The total number of words processed so far.A vector of word ids.A vector of word ids.
-func Skipgram(scope *Scope, filename string, batch_size int64, optional ...SkipgramAttr) (vocab_word tf.Output, vocab_freq tf.Output, words_per_epoch tf.Output, current_epoch tf.Output, total_words_processed tf.Output, examples tf.Output, labels tf.Output) {
+// input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
+// SparseTensor, possibly not in canonical ordering.
+// input_values: 1-D. `N` non-empty values corresponding to `input_indices`.
+// input_shape: 1-D. Shape of the input SparseTensor.
+// reduction_axes: 1-D. Length-`K` vector containing the reduction axes.
+func SparseReduceMaxSparse(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceMaxSparseAttr) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"filename": filename, "batch_size": batch_size}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "Skipgram",
-
+ Type: "SparseReduceMaxSparse",
+ Input: []tf.Input{
+ input_indices, input_values, input_shape, reduction_axes,
+ },
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6)
-}
-
-// ParameterizedTruncatedNormalAttr is an optional argument to ParameterizedTruncatedNormal.
-type ParameterizedTruncatedNormalAttr func(optionalAttr)
-
-// ParameterizedTruncatedNormalSeed sets the optional seed attribute to value.
-//
-// value: If either `seed` or `seed2` are set to be non-zero, the random number
-// generator is seeded by the given seed. Otherwise, it is seeded by a
-// random seed.
-// If not specified, defaults to 0
-func ParameterizedTruncatedNormalSeed(value int64) ParameterizedTruncatedNormalAttr {
- return func(m optionalAttr) {
- m["seed"] = value
- }
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// ParameterizedTruncatedNormalSeed2 sets the optional seed2 attribute to value.
+// Creates a dataset that emits the outputs of `input_dataset` `count` times.
//
-// value: A second seed to avoid seed collision.
-// If not specified, defaults to 0
-func ParameterizedTruncatedNormalSeed2(value int64) ParameterizedTruncatedNormalAttr {
- return func(m optionalAttr) {
- m["seed2"] = value
- }
-}
-
-// Outputs random values from a normal distribution. The parameters may each be a
+// Arguments:
//
-// scalar which applies to the entire output, or a vector of length shape[0] which
-// stores the parameters for each batch.
+// count: A scalar representing the number of times that `input_dataset` should
+// be repeated. A value of `-1` indicates that it should be repeated infinitely.
//
-// Arguments:
-// shape: The shape of the output tensor. Batches are indexed by the 0th dimension.
-// means: The mean parameter of each batch.
-// stdevs: The standard deviation parameter of each batch. Must be greater than 0.
-// minvals: The minimum cutoff. May be -infinity.
-// maxvals: The maximum cutoff. May be +infinity, and must be more than the minval
-// for each batch.
//
-// Returns A matrix of shape num_batches x samples_per_batch, filled with random
-// truncated normal values using the parameters for each row.
-func ParameterizedTruncatedNormal(scope *Scope, shape tf.Output, means tf.Output, stdevs tf.Output, minvals tf.Output, maxvals tf.Output, optional ...ParameterizedTruncatedNormalAttr) (output tf.Output) {
+func RepeatDataset(scope *Scope, input_dataset tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
+ attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
opspec := tf.OpSpec{
- Type: "ParameterizedTruncatedNormal",
+ Type: "RepeatDataset",
Input: []tf.Input{
- shape, means, stdevs, minvals, maxvals,
+ input_dataset, count,
},
Attrs: attrs,
}
@@ -10468,48 +10698,65 @@ func ParameterizedTruncatedNormal(scope *Scope, shape tf.Output, means tf.Output
return op.Output(0)
}
-// RandomUniformIntAttr is an optional argument to RandomUniformInt.
-type RandomUniformIntAttr func(optionalAttr)
+// AddManySparseToTensorsMapAttr is an optional argument to AddManySparseToTensorsMap.
+type AddManySparseToTensorsMapAttr func(optionalAttr)
-// RandomUniformIntSeed sets the optional seed attribute to value.
+// AddManySparseToTensorsMapContainer sets the optional container attribute to value.
//
-// value: If either `seed` or `seed2` are set to be non-zero, the random number
-// generator is seeded by the given seed. Otherwise, it is seeded by a
-// random seed.
-// If not specified, defaults to 0
-func RandomUniformIntSeed(value int64) RandomUniformIntAttr {
+// value: The container name for the `SparseTensorsMap` created by this op.
+// If not specified, defaults to ""
+func AddManySparseToTensorsMapContainer(value string) AddManySparseToTensorsMapAttr {
return func(m optionalAttr) {
- m["seed"] = value
+ m["container"] = value
}
}
-// RandomUniformIntSeed2 sets the optional seed2 attribute to value.
+// AddManySparseToTensorsMapSharedName sets the optional shared_name attribute to value.
//
-// value: A second seed to avoid seed collision.
-// If not specified, defaults to 0
-func RandomUniformIntSeed2(value int64) RandomUniformIntAttr {
+// value: The shared name for the `SparseTensorsMap` created by this op.
+// If blank, the new Operation's unique name is used.
+// If not specified, defaults to ""
+func AddManySparseToTensorsMapSharedName(value string) AddManySparseToTensorsMapAttr {
return func(m optionalAttr) {
- m["seed2"] = value
+ m["shared_name"] = value
}
}
-// Outputs random integers from a uniform distribution.
+// Add an `N`-minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles.
//
-// The generated values are uniform integers in the range `[minval, maxval)`.
-// The lower bound `minval` is included in the range, while the upper bound
-// `maxval` is excluded.
+// A `SparseTensor` of rank `R` is represented by three tensors: `sparse_indices`,
+// `sparse_values`, and `sparse_shape`, where
//
-// The random integers are slightly biased unless `maxval - minval` is an exact
-// power of two. The bias is small for values of `maxval - minval` significantly
-// smaller than the range of the output (either `2^32` or `2^64`).
+// ```sparse_indices.shape[1] == sparse_shape.shape[0] == R```
+//
+// An `N`-minibatch of `SparseTensor` objects is represented as a `SparseTensor`
+// having a first `sparse_indices` column taking values between `[0, N)`, where
+// the minibatch size `N == sparse_shape[0]`.
+//
+// The input `SparseTensor` must have rank `R` greater than 1, and the first
+// dimension is treated as the minibatch dimension. Elements of the `SparseTensor`
+// must be sorted in increasing order of this first dimension. The stored
+// `SparseTensor` objects pointed to by each row of the output `sparse_handles`
+// will have rank `R-1`.
+//
+// The `SparseTensor` values can then be read out as part of a minibatch by passing
+// the given keys as vector elements to `TakeManySparseFromTensorsMap`. To ensure
+// the correct `SparseTensorsMap` is accessed, ensure that the same
+// `container` and `shared_name` are passed to that Op. If no `shared_name`
+// is provided here, instead use the *name* of the Operation created by calling
+// `AddManySparseToTensorsMap` as the `shared_name` passed to
+// `TakeManySparseFromTensorsMap`. Ensure the Operations are colocated.
//
// Arguments:
-// shape: The shape of the output tensor.
-// minval: 0-D. Inclusive lower bound on the generated integers.
-// maxval: 0-D. Exclusive upper bound on the generated integers.
+// sparse_indices: 2-D. The `indices` of the minibatch `SparseTensor`.
+// `sparse_indices[:, 0]` must be ordered values in `[0, N)`.
+// sparse_values: 1-D. The `values` of the minibatch `SparseTensor`.
+// sparse_shape: 1-D. The `shape` of the minibatch `SparseTensor`.
+// The minibatch size `N == sparse_shape[0]`.
//
-// Returns A tensor of the specified shape filled with uniform random integers.
-func RandomUniformInt(scope *Scope, shape tf.Output, minval tf.Output, maxval tf.Output, optional ...RandomUniformIntAttr) (output tf.Output) {
+// Returns 1-D. The handles of the `SparseTensor` now stored in the
+// `SparseTensorsMap`. Shape: `[N]`.
+func AddManySparseToTensorsMap(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...AddManySparseToTensorsMapAttr) (sparse_handles tf.Output) {
if scope.Err() != nil {
return
}
@@ -10518,9 +10765,9 @@ func RandomUniformInt(scope *Scope, shape tf.Output, minval tf.Output, maxval tf
a(attrs)
}
opspec := tf.OpSpec{
- Type: "RandomUniformInt",
+ Type: "AddManySparseToTensorsMap",
Input: []tf.Input{
- shape, minval, maxval,
+ sparse_indices, sparse_values, sparse_shape,
},
Attrs: attrs,
}
@@ -10528,118 +10775,121 @@ func RandomUniformInt(scope *Scope, shape tf.Output, minval tf.Output, maxval tf
return op.Output(0)
}
-// Delete the TensorArray from its resource container.
+// MinAttr is an optional argument to Min.
+type MinAttr func(optionalAttr)
+
+// MinKeepDims sets the optional keep_dims attribute to value.
//
-// This enables the user to close and release the resource in the middle
-// of a step/run.
+// value: If true, retain reduced dimensions with length 1.
+// If not specified, defaults to false
+func MinKeepDims(value bool) MinAttr {
+ return func(m optionalAttr) {
+ m["keep_dims"] = value
+ }
+}
+
+// Computes the minimum of elements across dimensions of a tensor.
+//
+// Reduces `input` along the dimensions given in `axis`. Unless
+// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+// `axis`. If `keep_dims` is true, the reduced dimensions are
+// retained with length 1.
//
// Arguments:
-// handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
+// input: The tensor to reduce.
+// axis: The dimensions to reduce. Must be in the range
+// `[-rank(input), rank(input))`.
//
-// Returns the created operation.
-func TensorArrayCloseV3(scope *Scope, handle tf.Output) (o *tf.Operation) {
+// Returns The reduced tensor.
+func Min(scope *Scope, input tf.Output, axis tf.Output, optional ...MinAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "TensorArrayCloseV3",
+ Type: "Min",
Input: []tf.Input{
- handle,
+ input, axis,
},
+ Attrs: attrs,
}
- return scope.AddOperation(opspec)
-}
-
-// ResourceGatherAttr is an optional argument to ResourceGather.
-type ResourceGatherAttr func(optionalAttr)
-
-// ResourceGatherValidateIndices sets the optional validate_indices attribute to value.
-// If not specified, defaults to true
-func ResourceGatherValidateIndices(value bool) ResourceGatherAttr {
- return func(m optionalAttr) {
- m["validate_indices"] = value
- }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Gather slices from the variable pointed to by `resource` according to `indices`.
-//
-// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
-// Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
-//
-// ```python
-// # Scalar indices
-// output[:, ..., :] = params[indices, :, ... :]
-//
-// # Vector indices
-// output[i, :, ..., :] = params[indices[i], :, ... :]
+// Shuffle dimensions of x according to a permutation.
//
-// # Higher rank indices
-// output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
-// ```
-func ResourceGather(scope *Scope, resource tf.Output, indices tf.Output, dtype tf.DataType, optional ...ResourceGatherAttr) (output tf.Output) {
+// The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
+// `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
+func Transpose(scope *Scope, x tf.Output, perm tf.Output) (y tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtype": dtype}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "ResourceGather",
+ Type: "Transpose",
Input: []tf.Input{
- resource, indices,
+ x, perm,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// QuantizedConv2DAttr is an optional argument to QuantizedConv2D.
-type QuantizedConv2DAttr func(optionalAttr)
+// DepthwiseConv2dNativeBackpropFilterAttr is an optional argument to DepthwiseConv2dNativeBackpropFilter.
+type DepthwiseConv2dNativeBackpropFilterAttr func(optionalAttr)
-// QuantizedConv2DOutType sets the optional out_type attribute to value.
-// If not specified, defaults to DT_QINT32
-func QuantizedConv2DOutType(value tf.DataType) QuantizedConv2DAttr {
+// DepthwiseConv2dNativeBackpropFilterDataFormat sets the optional data_format attribute to value.
+//
+// value: Specify the data format of the input and output data. With the
+// default format "NHWC", the data is stored in the order of:
+// [batch, height, width, channels].
+// Alternatively, the format could be "NCHW", the data storage order of:
+// [batch, channels, height, width].
+// If not specified, defaults to "NHWC"
+func DepthwiseConv2dNativeBackpropFilterDataFormat(value string) DepthwiseConv2dNativeBackpropFilterAttr {
return func(m optionalAttr) {
- m["out_type"] = value
+ m["data_format"] = value
}
}
-// QuantizedConv2DDilations sets the optional dilations attribute to value.
+// DepthwiseConv2dNativeBackpropFilterDilations sets the optional dilations attribute to value.
//
// value: 1-D tensor of length 4. The dilation factor for each dimension of
-// `input`. If set to k > 1, there will be k-1 skipped cells between each
-// filter element on that dimension. The dimension order is determined by the
-// value of `data_format`, see above for details. Dilations in the batch and
-// depth dimensions must be 1.
+// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
+// element on that dimension. The dimension order is determined by the value of
+// `data_format`, see above for details. Dilations in the batch and depth
+// dimensions must be 1.
// If not specified, defaults to <i:1 i:1 i:1 i:1 >
-func QuantizedConv2DDilations(value []int64) QuantizedConv2DAttr {
+func DepthwiseConv2dNativeBackpropFilterDilations(value []int64) DepthwiseConv2dNativeBackpropFilterAttr {
return func(m optionalAttr) {
m["dilations"] = value
}
}
-// Computes a 2D convolution given quantized 4D input and filter tensors.
-//
-// The inputs are quantized tensors where the lowest value represents the real
-// number of the associated minimum, and the highest represents the maximum.
-// This means that you can only interpret the quantized output in the same way, by
-// taking the returned minimum and maximum values into account.
+// Computes the gradients of depthwise convolution with respect to the filter.
//
// Arguments:
-//
-// filter: filter's input_depth dimension must match input's depth dimensions.
-// min_input: The float value that the lowest quantized input value represents.
-// max_input: The float value that the highest quantized input value represents.
-// min_filter: The float value that the lowest quantized filter value represents.
-// max_filter: The float value that the highest quantized filter value represents.
+// input: 4-D with shape based on `data_format`. For example, if
+// `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height,
+// in_width, in_channels]` tensor.
+// filter_sizes: An integer vector representing the tensor shape of `filter`,
+// where `filter` is a 4-D
+// `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.
+// out_backprop: 4-D with shape based on `data_format`.
+// For example, if `data_format` is 'NHWC' then
+// out_backprop shape is `[batch, out_height, out_width, out_channels]`.
+// Gradients w.r.t. the output of the convolution.
// strides: The stride of the sliding window for each dimension of the input
-// tensor.
+// of the convolution.
// padding: The type of padding algorithm to use.
//
-// Returns The float value that the lowest quantized output value represents.The float value that the highest quantized output value represents.
-func QuantizedConv2D(scope *Scope, input tf.Output, filter tf.Output, min_input tf.Output, max_input tf.Output, min_filter tf.Output, max_filter tf.Output, strides []int64, padding string, optional ...QuantizedConv2DAttr) (output tf.Output, min_output tf.Output, max_output tf.Output) {
+// Returns 4-D with shape
+// `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t.
+// the `filter` input of the convolution.
+func DepthwiseConv2dNativeBackpropFilter(scope *Scope, input tf.Output, filter_sizes tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...DepthwiseConv2dNativeBackpropFilterAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -10648,182 +10898,86 @@ func QuantizedConv2D(scope *Scope, input tf.Output, filter tf.Output, min_input
a(attrs)
}
opspec := tf.OpSpec{
- Type: "QuantizedConv2D",
+ Type: "DepthwiseConv2dNativeBackpropFilter",
Input: []tf.Input{
- input, filter, min_input, max_input, min_filter, max_filter,
+ input, filter_sizes, out_backprop,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
-}
-
-// QueueDequeueV2Attr is an optional argument to QueueDequeueV2.
-type QueueDequeueV2Attr func(optionalAttr)
-
-// QueueDequeueV2TimeoutMs sets the optional timeout_ms attribute to value.
-//
-// value: If the queue is empty, this operation will block for up to
-// timeout_ms milliseconds.
-// Note: This option is not supported yet.
-// If not specified, defaults to -1
-func QueueDequeueV2TimeoutMs(value int64) QueueDequeueV2Attr {
- return func(m optionalAttr) {
- m["timeout_ms"] = value
- }
+ return op.Output(0)
}
-// Dequeues a tuple of one or more tensors from the given queue.
-//
-// This operation has k outputs, where k is the number of components
-// in the tuples stored in the given queue, and output i is the ith
-// component of the dequeued tuple.
+// Component-wise divides a SparseTensor by a dense Tensor.
//
-// N.B. If the queue is empty, this operation will block until an element
-// has been dequeued (or 'timeout_ms' elapses, if specified).
+// *Limitation*: this Op only broadcasts the dense side to the sparse side, but not
+// the other direction.
//
// Arguments:
-// handle: The handle to a queue.
-// component_types: The type of each component in a tuple.
+// sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
+// SparseTensor, possibly not in canonical ordering.
+// sp_values: 1-D. `N` non-empty values corresponding to `sp_indices`.
+// sp_shape: 1-D. Shape of the input SparseTensor.
+// dense: `R`-D. The dense Tensor operand.
//
-// Returns One or more tensors that were dequeued as a tuple.
-func QueueDequeueV2(scope *Scope, handle tf.Output, component_types []tf.DataType, optional ...QueueDequeueV2Attr) (components []tf.Output) {
+// Returns 1-D. The `N` values that are operated on.
+func SparseDenseCwiseDiv(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output, dense tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"component_types": component_types}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "QueueDequeueV2",
+ Type: "SparseDenseCwiseDiv",
Input: []tf.Input{
- handle,
+ sp_indices, sp_values, sp_shape, dense,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
- if scope.Err() != nil {
- return
- }
- var idx int
- var err error
- if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
- scope.UpdateErr("QueueDequeueV2", err)
- return
- }
- return components
-}
-
-// ParseSingleSequenceExampleAttr is an optional argument to ParseSingleSequenceExample.
-type ParseSingleSequenceExampleAttr func(optionalAttr)
-
-// ParseSingleSequenceExampleContextSparseTypes sets the optional context_sparse_types attribute to value.
-//
-// value: A list of Ncontext_sparse types; the data types of data in
-// each context Feature given in context_sparse_keys.
-// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
-// DT_INT64 (Int64List), and DT_STRING (BytesList).
-// If not specified, defaults to <>
-//
-// REQUIRES: len(value) >= 0
-func ParseSingleSequenceExampleContextSparseTypes(value []tf.DataType) ParseSingleSequenceExampleAttr {
- return func(m optionalAttr) {
- m["context_sparse_types"] = value
- }
+ return op.Output(0)
}
-// ParseSingleSequenceExampleFeatureListDenseTypes sets the optional feature_list_dense_types attribute to value.
-// If not specified, defaults to <>
-//
-// REQUIRES: len(value) >= 0
-func ParseSingleSequenceExampleFeatureListDenseTypes(value []tf.DataType) ParseSingleSequenceExampleAttr {
- return func(m optionalAttr) {
- m["feature_list_dense_types"] = value
- }
-}
+// ResourceApplyMomentumAttr is an optional argument to ResourceApplyMomentum.
+type ResourceApplyMomentumAttr func(optionalAttr)
-// ParseSingleSequenceExampleContextDenseShapes sets the optional context_dense_shapes attribute to value.
-//
-// value: A list of Ncontext_dense shapes; the shapes of data in
-// each context Feature given in context_dense_keys.
-// The number of elements in the Feature corresponding to context_dense_key[j]
-// must always equal context_dense_shapes[j].NumEntries().
-// The shape of context_dense_values[j] will match context_dense_shapes[j].
-// If not specified, defaults to <>
+// ResourceApplyMomentumUseLocking sets the optional use_locking attribute to value.
//
-// REQUIRES: len(value) >= 0
-func ParseSingleSequenceExampleContextDenseShapes(value []tf.Shape) ParseSingleSequenceExampleAttr {
+// value: If `True`, updating of the var and accum tensors will be protected
+// by a lock; otherwise the behavior is undefined, but may exhibit less
+// contention.
+// If not specified, defaults to false
+func ResourceApplyMomentumUseLocking(value bool) ResourceApplyMomentumAttr {
return func(m optionalAttr) {
- m["context_dense_shapes"] = value
+ m["use_locking"] = value
}
}
-// ParseSingleSequenceExampleFeatureListSparseTypes sets the optional feature_list_sparse_types attribute to value.
-//
-// value: A list of Nfeature_list_sparse types; the data types
-// of data in each FeatureList given in feature_list_sparse_keys.
-// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
-// DT_INT64 (Int64List), and DT_STRING (BytesList).
-// If not specified, defaults to <>
+// ResourceApplyMomentumUseNesterov sets the optional use_nesterov attribute to value.
//
-// REQUIRES: len(value) >= 0
-func ParseSingleSequenceExampleFeatureListSparseTypes(value []tf.DataType) ParseSingleSequenceExampleAttr {
+// value: If `True`, the tensor passed to compute grad will be
+// var - lr * momentum * accum, so in the end, the var you get is actually
+// var - lr * momentum * accum.
+// If not specified, defaults to false
+func ResourceApplyMomentumUseNesterov(value bool) ResourceApplyMomentumAttr {
return func(m optionalAttr) {
- m["feature_list_sparse_types"] = value
+ m["use_nesterov"] = value
}
}
-// ParseSingleSequenceExampleFeatureListDenseShapes sets the optional feature_list_dense_shapes attribute to value.
+// Update '*var' according to the momentum scheme. Set use_nesterov = True if you
//
-// value: A list of Nfeature_list_dense shapes; the shapes of
-// data in each FeatureList given in feature_list_dense_keys.
-// The shape of each Feature in the FeatureList corresponding to
-// feature_list_dense_key[j] must always equal
-// feature_list_dense_shapes[j].NumEntries().
-// If not specified, defaults to <>
+// want to use Nesterov momentum.
//
-// REQUIRES: len(value) >= 0
-func ParseSingleSequenceExampleFeatureListDenseShapes(value []tf.Shape) ParseSingleSequenceExampleAttr {
- return func(m optionalAttr) {
- m["feature_list_dense_shapes"] = value
- }
-}
-
-// Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors.
+// accum = accum * momentum + grad
+// var -= lr * accum
//
// Arguments:
-// serialized: A scalar containing a binary serialized SequenceExample proto.
-// feature_list_dense_missing_assumed_empty: A vector listing the
-// FeatureList keys which may be missing from the SequenceExample. If the
-// associated FeatureList is missing, it is treated as empty. By default,
-// any FeatureList not listed in this vector must exist in the SequenceExample.
-// context_sparse_keys: A list of Ncontext_sparse string Tensors (scalars).
-// The keys expected in the Examples' features associated with context_sparse
-// values.
-// context_dense_keys: A list of Ncontext_dense string Tensors (scalars).
-// The keys expected in the SequenceExamples' context features associated with
-// dense values.
-// feature_list_sparse_keys: A list of Nfeature_list_sparse string Tensors
-// (scalars). The keys expected in the FeatureLists associated with sparse
-// values.
-// feature_list_dense_keys: A list of Nfeature_list_dense string Tensors (scalars).
-// The keys expected in the SequenceExamples' feature_lists associated
-// with lists of dense values.
-// context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty).
-// context_dense_defaults[j] provides default values
-// when the SequenceExample's context map lacks context_dense_key[j].
-// If an empty Tensor is provided for context_dense_defaults[j],
-// then the Feature context_dense_keys[j] is required.
-// The input type is inferred from context_dense_defaults[j], even when it's
-// empty. If context_dense_defaults[j] is not empty, its shape must match
-// context_dense_shapes[j].
-// debug_name: A scalar containing the name of the serialized proto.
-// May contain, for example, table key (descriptive) name for the
-// corresponding serialized proto. This is purely useful for debugging
-// purposes, and the presence of values here has no effect on the output.
-// May also be an empty scalar if no name is available.
-func ParseSingleSequenceExample(scope *Scope, serialized tf.Output, feature_list_dense_missing_assumed_empty tf.Output, context_sparse_keys []tf.Output, context_dense_keys []tf.Output, feature_list_sparse_keys []tf.Output, feature_list_dense_keys []tf.Output, context_dense_defaults []tf.Output, debug_name tf.Output, optional ...ParseSingleSequenceExampleAttr) (context_sparse_indices []tf.Output, context_sparse_values []tf.Output, context_sparse_shapes []tf.Output, context_dense_values []tf.Output, feature_list_sparse_indices []tf.Output, feature_list_sparse_values []tf.Output, feature_list_sparse_shapes []tf.Output, feature_list_dense_values []tf.Output) {
+// var_: Should be from a Variable().
+// accum: Should be from a Variable().
+// lr: Scaling factor. Must be a scalar.
+// grad: The gradient.
+// momentum: Momentum. Must be a scalar.
+//
+// Returns the created operation.
+func ResourceApplyMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, momentum tf.Output, optional ...ResourceApplyMomentumAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
@@ -10832,105 +10986,91 @@ func ParseSingleSequenceExample(scope *Scope, serialized tf.Output, feature_list
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ParseSingleSequenceExample",
+ Type: "ResourceApplyMomentum",
Input: []tf.Input{
- serialized, feature_list_dense_missing_assumed_empty, tf.OutputList(context_sparse_keys), tf.OutputList(context_dense_keys), tf.OutputList(feature_list_sparse_keys), tf.OutputList(feature_list_dense_keys), tf.OutputList(context_dense_defaults), debug_name,
+ var_, accum, lr, grad, momentum,
},
Attrs: attrs,
}
- op := scope.AddOperation(opspec)
+ return scope.AddOperation(opspec)
+}
+
+// Returns the truth value of (x >= y) element-wise.
+//
+// *NOTE*: `GreaterEqual` supports broadcasting. More about broadcasting
+// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+func GreaterEqual(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
- var idx int
- var err error
- if context_sparse_indices, idx, err = makeOutputList(op, idx, "context_sparse_indices"); err != nil {
- scope.UpdateErr("ParseSingleSequenceExample", err)
- return
- }
- if context_sparse_values, idx, err = makeOutputList(op, idx, "context_sparse_values"); err != nil {
- scope.UpdateErr("ParseSingleSequenceExample", err)
- return
- }
- if context_sparse_shapes, idx, err = makeOutputList(op, idx, "context_sparse_shapes"); err != nil {
- scope.UpdateErr("ParseSingleSequenceExample", err)
- return
- }
- if context_dense_values, idx, err = makeOutputList(op, idx, "context_dense_values"); err != nil {
- scope.UpdateErr("ParseSingleSequenceExample", err)
- return
- }
- if feature_list_sparse_indices, idx, err = makeOutputList(op, idx, "feature_list_sparse_indices"); err != nil {
- scope.UpdateErr("ParseSingleSequenceExample", err)
- return
- }
- if feature_list_sparse_values, idx, err = makeOutputList(op, idx, "feature_list_sparse_values"); err != nil {
- scope.UpdateErr("ParseSingleSequenceExample", err)
- return
- }
- if feature_list_sparse_shapes, idx, err = makeOutputList(op, idx, "feature_list_sparse_shapes"); err != nil {
- scope.UpdateErr("ParseSingleSequenceExample", err)
- return
- }
- if feature_list_dense_values, idx, err = makeOutputList(op, idx, "feature_list_dense_values"); err != nil {
- scope.UpdateErr("ParseSingleSequenceExample", err)
- return
+ opspec := tf.OpSpec{
+ Type: "GreaterEqual",
+ Input: []tf.Input{
+ x, y,
+ },
}
- return context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// RandomGammaAttr is an optional argument to RandomGamma.
-type RandomGammaAttr func(optionalAttr)
+// Conv3DAttr is an optional argument to Conv3D.
+type Conv3DAttr func(optionalAttr)
-// RandomGammaSeed sets the optional seed attribute to value.
+// Conv3DDataFormat sets the optional data_format attribute to value.
//
-// value: If either `seed` or `seed2` are set to be non-zero, the random number
-// generator is seeded by the given seed. Otherwise, it is seeded by a
-// random seed.
-// If not specified, defaults to 0
-func RandomGammaSeed(value int64) RandomGammaAttr {
+// value: The data format of the input and output data. With the
+// default format "NDHWC", the data is stored in the order of:
+// [batch, in_depth, in_height, in_width, in_channels].
+// Alternatively, the format could be "NCDHW", the data storage order is:
+// [batch, in_channels, in_depth, in_height, in_width].
+// If not specified, defaults to "NDHWC"
+func Conv3DDataFormat(value string) Conv3DAttr {
return func(m optionalAttr) {
- m["seed"] = value
+ m["data_format"] = value
}
}
-// RandomGammaSeed2 sets the optional seed2 attribute to value.
+// Conv3DDilations sets the optional dilations attribute to value.
//
-// value: A second seed to avoid seed collision.
-// If not specified, defaults to 0
-func RandomGammaSeed2(value int64) RandomGammaAttr {
+// value: 1-D tensor of length 5. The dilation factor for each dimension of
+// `input`. If set to k > 1, there will be k-1 skipped cells between each
+// filter element on that dimension. The dimension order is determined by the
+// value of `data_format`, see above for details. Dilations in the batch and
+// depth dimensions must be 1.
+// If not specified, defaults to <i:1 i:1 i:1 i:1 i:1 >
+func Conv3DDilations(value []int64) Conv3DAttr {
return func(m optionalAttr) {
- m["seed2"] = value
+ m["dilations"] = value
}
}
-// Outputs random values from the Gamma distribution(s) described by alpha.
+// Computes a 3-D convolution given 5-D `input` and `filter` tensors.
//
-// This op uses the algorithm by Marsaglia et al. to acquire samples via
-// transformation-rejection from pairs of uniform and normal random variables.
-// See http://dl.acm.org/citation.cfm?id=358414
+// In signal processing, cross-correlation is a measure of similarity of
+// two waveforms as a function of a time-lag applied to one of them. This
+// is also known as a sliding dot product or sliding inner-product.
//
-// Arguments:
-// shape: 1-D integer tensor. Shape of independent samples to draw from each
-// distribution described by the shape parameters given in alpha.
-// alpha: A tensor in which each scalar is a "shape" parameter describing the
-// associated gamma distribution.
+// Our Conv3D implements a form of cross-correlation.
//
-// Returns A tensor with shape `shape + shape(alpha)`. Each slice
-// `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for
-// `alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha.
-func RandomGamma(scope *Scope, shape tf.Output, alpha tf.Output, optional ...RandomGammaAttr) (output tf.Output) {
+// Arguments:
+// input: Shape `[batch, in_depth, in_height, in_width, in_channels]`.
+// filter: Shape `[filter_depth, filter_height, filter_width, in_channels,
+// out_channels]`. `in_channels` must match between `input` and `filter`.
+// strides: 1-D tensor of length 5. The stride of the sliding window for each
+// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
+// padding: The type of padding algorithm to use.
+func Conv3D(scope *Scope, input tf.Output, filter tf.Output, strides []int64, padding string, optional ...Conv3DAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"strides": strides, "padding": padding}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "RandomGamma",
+ Type: "Conv3D",
Input: []tf.Input{
- shape, alpha,
+ input, filter,
},
Attrs: attrs,
}
@@ -10938,339 +11078,366 @@ func RandomGamma(scope *Scope, shape tf.Output, alpha tf.Output, optional ...Ran
return op.Output(0)
}
-// Returns the element-wise sum of a list of tensors.
-//
-// `tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not
-// wait for all of its inputs to be ready before beginning to sum. This can
-// save memory if inputs are ready at different times, since minimum temporary
-// storage is proportional to the output size rather than the inputs size.
+// Adds up a SparseTensor and a dense Tensor, using these special rules:
//
-// Unlike the original `accumulate_n`, `accumulate_n_v2` is differentiable.
+// (1) Broadcasts the dense side to have the same shape as the sparse side, if
+// eligible;
+// (2) Then, only the dense values pointed to by the indices of the SparseTensor
+// participate in the cwise addition.
//
-// Returns a `Tensor` of same shape and type as the elements of `inputs`.
+// By these rules, the result is a logical SparseTensor with exactly the same
+// indices and shape, but possibly with different non-zero values. The output of
+// this Op is the resultant non-zero values.
//
// Arguments:
-// inputs: A list of `Tensor` objects, each with same shape and type.
-// shape: Shape of elements of `inputs`.
-func AccumulateNV2(scope *Scope, inputs []tf.Output, shape tf.Shape) (sum tf.Output) {
+// sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
+// SparseTensor, possibly not in canonical ordering.
+// sp_values: 1-D. `N` non-empty values corresponding to `sp_indices`.
+// sp_shape: 1-D. Shape of the input SparseTensor.
+// dense: `R`-D. The dense Tensor operand.
+//
+// Returns 1-D. The `N` values that are operated on.
+func SparseDenseCwiseAdd(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output, dense tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"shape": shape}
opspec := tf.OpSpec{
- Type: "AccumulateNV2",
+ Type: "SparseDenseCwiseAdd",
Input: []tf.Input{
- tf.OutputList(inputs),
+ sp_indices, sp_values, sp_shape, dense,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Computes the gradient for the inverse of `x` wrt its input.
+// Read an element from the TensorArray into output `value`.
//
-// Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
-// is the corresponding input gradient.
-func ReciprocalGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
+// Arguments:
+// handle: The handle to a TensorArray.
+//
+// flow_in: A float scalar that enforces proper chaining of operations.
+// dtype: The type of the elem that is returned.
+//
+// Returns The tensor that is read from the TensorArray.
+func TensorArrayReadV3(scope *Scope, handle tf.Output, index tf.Output, flow_in tf.Output, dtype tf.DataType) (value tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"dtype": dtype}
opspec := tf.OpSpec{
- Type: "ReciprocalGrad",
+ Type: "TensorArrayReadV3",
Input: []tf.Input{
- y, dy,
+ handle, index, flow_in,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Convert JSON-encoded Example records to binary protocol buffer strings.
+// EncodePngAttr is an optional argument to EncodePng.
+type EncodePngAttr func(optionalAttr)
+
+// EncodePngCompression sets the optional compression attribute to value.
//
-// This op translates a tensor containing Example records, encoded using
-// the [standard JSON
-// mapping](https://developers.google.com/protocol-buffers/docs/proto3#json),
-// into a tensor containing the same records encoded as binary protocol
-// buffers. The resulting tensor can then be fed to any of the other
-// Example-parsing ops.
+// value: Compression level.
+// If not specified, defaults to -1
+func EncodePngCompression(value int64) EncodePngAttr {
+ return func(m optionalAttr) {
+ m["compression"] = value
+ }
+}
+
+// PNG-encode an image.
+//
+// `image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]`
+// where `channels` is:
+//
+// * 1: for grayscale.
+// * 2: for grayscale + alpha.
+// * 3: for RGB.
+// * 4: for RGBA.
+//
+// The ZLIB compression level, `compression`, can be -1 for the PNG-encoder
+// default or a value from 0 to 9. 9 is the highest compression level, generating
+// the smallest output, but is slower.
//
// Arguments:
-// json_examples: Each string is a JSON object serialized according to the JSON
-// mapping of the Example proto.
+// image: 3-D with shape `[height, width, channels]`.
//
-// Returns Each string is a binary Example protocol buffer corresponding
-// to the respective element of `json_examples`.
-func DecodeJSONExample(scope *Scope, json_examples tf.Output) (binary_examples tf.Output) {
+// Returns 0-D. PNG-encoded image.
+func EncodePng(scope *Scope, image tf.Output, optional ...EncodePngAttr) (contents tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "DecodeJSONExample",
+ Type: "EncodePng",
Input: []tf.Input{
- json_examples,
+ image,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Adds sparse updates to the variable referenced by `resource`.
-//
-// This operation computes
-//
-// # Scalar indices
-// ref[indices, ...] += updates[...]
-//
-// # Vector indices (for each i)
-// ref[indices[i], ...] += updates[i, ...]
-//
-// # High rank indices (for each i, ..., j)
-// ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
+// DataFormatVecPermuteAttr is an optional argument to DataFormatVecPermute.
+type DataFormatVecPermuteAttr func(optionalAttr)
+
+// DataFormatVecPermuteSrcFormat sets the optional src_format attribute to value.
//
-// Duplicate entries are handled correctly: if multiple `indices` reference
-// the same location, their contributions add.
+// value: source data format.
+// If not specified, defaults to "NHWC"
+func DataFormatVecPermuteSrcFormat(value string) DataFormatVecPermuteAttr {
+ return func(m optionalAttr) {
+ m["src_format"] = value
+ }
+}
+
+// DataFormatVecPermuteDstFormat sets the optional dst_format attribute to value.
//
-// Requires `updates.shape = indices.shape + ref.shape[1:]`.
+// value: destination data format.
+// If not specified, defaults to "NCHW"
+func DataFormatVecPermuteDstFormat(value string) DataFormatVecPermuteAttr {
+ return func(m optionalAttr) {
+ m["dst_format"] = value
+ }
+}
+
+// Returns the permuted vector/tensor in the destination data format given the
//
-// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
-// </div>
+// one in the source data format.
//
// Arguments:
-// resource: Should be from a `Variable` node.
-// indices: A tensor of indices into the first dimension of `ref`.
-// updates: A tensor of updated values to add to `ref`.
+// x: Vector of size 4 or Tensor of shape (4, 2) in source data format.
//
-// Returns the created operation.
-func ResourceScatterAdd(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
+// Returns Vector of size 4 or Tensor of shape (4, 2) in destination data format.
+func DataFormatVecPermute(scope *Scope, x tf.Output, optional ...DataFormatVecPermuteAttr) (y tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "ResourceScatterAdd",
+ Type: "DataFormatVecPermute",
Input: []tf.Input{
- resource, indices, updates,
+ x,
},
+ Attrs: attrs,
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Eagerly executes a python function to compute func(input)->output. The
+// Returns element-wise integer closest to x.
//
-// semantics of the input, output, and attributes are the same as those for
-// PyFunc.
-func EagerPyFunc(scope *Scope, input []tf.Output, token string, Tout []tf.DataType) (output []tf.Output) {
+// If the result is midway between two representable values,
+// the even representable is chosen.
+// For example:
+//
+// ```
+// rint(-1.5) ==> -2.0
+// rint(0.5000001) ==> 1.0
+// rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.]
+// ```
+func Rint(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"token": token, "Tout": Tout}
opspec := tf.OpSpec{
- Type: "EagerPyFunc",
+ Type: "Rint",
Input: []tf.Input{
- tf.OutputList(input),
+ x,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
- if scope.Err() != nil {
- return
- }
- var idx int
- var err error
- if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
- scope.UpdateErr("EagerPyFunc", err)
- return
- }
- return output
+ return op.Output(0)
}
-// DepthwiseConv2dNativeBackpropInputAttr is an optional argument to DepthwiseConv2dNativeBackpropInput.
-type DepthwiseConv2dNativeBackpropInputAttr func(optionalAttr)
+// OrderedMapUnstageNoKeyAttr is an optional argument to OrderedMapUnstageNoKey.
+type OrderedMapUnstageNoKeyAttr func(optionalAttr)
-// DepthwiseConv2dNativeBackpropInputDataFormat sets the optional data_format attribute to value.
+// OrderedMapUnstageNoKeyCapacity sets the optional capacity attribute to value.
+// If not specified, defaults to 0
//
-// value: Specify the data format of the input and output data. With the
-// default format "NHWC", the data is stored in the order of:
-// [batch, height, width, channels].
-// Alternatively, the format could be "NCHW", the data storage order of:
-// [batch, channels, height, width].
-// If not specified, defaults to "NHWC"
-func DepthwiseConv2dNativeBackpropInputDataFormat(value string) DepthwiseConv2dNativeBackpropInputAttr {
+// REQUIRES: value >= 0
+func OrderedMapUnstageNoKeyCapacity(value int64) OrderedMapUnstageNoKeyAttr {
return func(m optionalAttr) {
- m["data_format"] = value
+ m["capacity"] = value
}
}
-// DepthwiseConv2dNativeBackpropInputDilations sets the optional dilations attribute to value.
+// OrderedMapUnstageNoKeyMemoryLimit sets the optional memory_limit attribute to value.
+// If not specified, defaults to 0
//
-// value: 1-D tensor of length 4. The dilation factor for each dimension of
-// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
-// element on that dimension. The dimension order is determined by the value of
-// `data_format`, see above for details. Dilations in the batch and depth
-// dimensions must be 1.
-// If not specified, defaults to <i:1 i:1 i:1 i:1 >
-func DepthwiseConv2dNativeBackpropInputDilations(value []int64) DepthwiseConv2dNativeBackpropInputAttr {
+// REQUIRES: value >= 0
+func OrderedMapUnstageNoKeyMemoryLimit(value int64) OrderedMapUnstageNoKeyAttr {
return func(m optionalAttr) {
- m["dilations"] = value
+ m["memory_limit"] = value
}
}
-// Computes the gradients of depthwise convolution with respect to the input.
-//
-// Arguments:
-// input_sizes: An integer vector representing the shape of `input`, based
-// on `data_format`. For example, if `data_format` is 'NHWC' then
-// `input` is a 4-D `[batch, height, width, channels]` tensor.
-// filter: 4-D with shape
-// `[filter_height, filter_width, in_channels, depthwise_multiplier]`.
-// out_backprop: 4-D with shape based on `data_format`.
-// For example, if `data_format` is 'NHWC' then
-// out_backprop shape is `[batch, out_height, out_width, out_channels]`.
-// Gradients w.r.t. the output of the convolution.
-// strides: The stride of the sliding window for each dimension of the input
-// of the convolution.
-// padding: The type of padding algorithm to use.
+// OrderedMapUnstageNoKeyContainer sets the optional container attribute to value.
+// If not specified, defaults to ""
+func OrderedMapUnstageNoKeyContainer(value string) OrderedMapUnstageNoKeyAttr {
+ return func(m optionalAttr) {
+ m["container"] = value
+ }
+}
+
+// OrderedMapUnstageNoKeySharedName sets the optional shared_name attribute to value.
+// If not specified, defaults to ""
+func OrderedMapUnstageNoKeySharedName(value string) OrderedMapUnstageNoKeyAttr {
+ return func(m optionalAttr) {
+ m["shared_name"] = value
+ }
+}
+
+// Op removes and returns the (key, value) element with the smallest
//
-// Returns 4-D with shape according to `data_format`. For example, if
-// `data_format` is 'NHWC', output shape is `[batch, in_height,
-// in_width, in_channels]`. Gradient w.r.t. the input of the
-// convolution.
-func DepthwiseConv2dNativeBackpropInput(scope *Scope, input_sizes tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...DepthwiseConv2dNativeBackpropInputAttr) (output tf.Output) {
+// key from the underlying container. If the underlying container
+// does not contain elements, the op will block until it does.
+func OrderedMapUnstageNoKey(scope *Scope, indices tf.Output, dtypes []tf.DataType, optional ...OrderedMapUnstageNoKeyAttr) (key tf.Output, values []tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"strides": strides, "padding": padding}
+ attrs := map[string]interface{}{"dtypes": dtypes}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "DepthwiseConv2dNativeBackpropInput",
+ Type: "OrderedMapUnstageNoKey",
Input: []tf.Input{
- input_sizes, filter, out_backprop,
+ indices,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// Creates a dataset with a range of values. Corresponds to python's xrange.
-//
-// Arguments:
-// start: corresponds to start in python's xrange().
-// stop: corresponds to stop in python's xrange().
-// step: corresponds to step in python's xrange().
-//
-//
-func RangeDataset(scope *Scope, start tf.Output, stop tf.Output, step tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
- opspec := tf.OpSpec{
- Type: "RangeDataset",
- Input: []tf.Input{
- start, stop, step,
- },
- Attrs: attrs,
+ var idx int
+ var err error
+ key = op.Output(idx)
+ if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
+ scope.UpdateErr("OrderedMapUnstageNoKey", err)
+ return
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return key, values
}
-// Saves tensors in V2 checkpoint format.
+// MaxPool3DGradGradAttr is an optional argument to MaxPool3DGradGrad.
+type MaxPool3DGradGradAttr func(optionalAttr)
+
+// MaxPool3DGradGradDataFormat sets the optional data_format attribute to value.
//
-// By default, saves the named tensors in full. If the caller wishes to save
-// specific slices of full tensors, "shape_and_slices" should be non-empty strings
-// and correspondingly well-formed.
+// value: The data format of the input and output data. With the
+// default format "NDHWC", the data is stored in the order of:
+// [batch, in_depth, in_height, in_width, in_channels].
+// Alternatively, the format could be "NCDHW", the data storage order is:
+// [batch, in_channels, in_depth, in_height, in_width].
+// If not specified, defaults to "NDHWC"
+func MaxPool3DGradGradDataFormat(value string) MaxPool3DGradGradAttr {
+ return func(m optionalAttr) {
+ m["data_format"] = value
+ }
+}
+
+// Computes second-order gradients of the maxpooling function.
//
// Arguments:
-// prefix: Must have a single element. The prefix of the V2 checkpoint to which we
-// write the tensors.
-// tensor_names: shape {N}. The names of the tensors to be saved.
-// shape_and_slices: shape {N}. The slice specs of the tensors to be saved.
-// Empty strings indicate that they are non-partitioned tensors.
-// tensors: `N` tensors to save.
+// orig_input: The original input tensor.
+// orig_output: The original output tensor.
+// grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.
+// ksize: 1-D tensor of length 5. The size of the window for each dimension of
+// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
+// strides: 1-D tensor of length 5. The stride of the sliding window for each
+// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
+// padding: The type of padding algorithm to use.
//
-// Returns the created operation.
-func SaveV2(scope *Scope, prefix tf.Output, tensor_names tf.Output, shape_and_slices tf.Output, tensors []tf.Output) (o *tf.Operation) {
+// Returns Gradients of gradients w.r.t. the input to `max_pool`.
+func MaxPool3DGradGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPool3DGradGradAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "SaveV2",
+ Type: "MaxPool3DGradGrad",
Input: []tf.Input{
- prefix, tensor_names, shape_and_slices, tf.OutputList(tensors),
+ orig_input, orig_output, grad,
},
+ Attrs: attrs,
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// MatrixTriangularSolveAttr is an optional argument to MatrixTriangularSolve.
-type MatrixTriangularSolveAttr func(optionalAttr)
+// Conv3DBackpropFilterV2Attr is an optional argument to Conv3DBackpropFilterV2.
+type Conv3DBackpropFilterV2Attr func(optionalAttr)
-// MatrixTriangularSolveLower sets the optional lower attribute to value.
+// Conv3DBackpropFilterV2DataFormat sets the optional data_format attribute to value.
//
-// value: Boolean indicating whether the innermost matrices in `matrix` are
-// lower or upper triangular.
-// If not specified, defaults to true
-func MatrixTriangularSolveLower(value bool) MatrixTriangularSolveAttr {
+// value: The data format of the input and output data. With the
+// default format "NDHWC", the data is stored in the order of:
+// [batch, in_depth, in_height, in_width, in_channels].
+// Alternatively, the format could be "NCDHW", the data storage order is:
+// [batch, in_channels, in_depth, in_height, in_width].
+// If not specified, defaults to "NDHWC"
+func Conv3DBackpropFilterV2DataFormat(value string) Conv3DBackpropFilterV2Attr {
return func(m optionalAttr) {
- m["lower"] = value
+ m["data_format"] = value
}
}
-// MatrixTriangularSolveAdjoint sets the optional adjoint attribute to value.
-//
-// value: Boolean indicating whether to solve with `matrix` or its (block-wise)
-// adjoint.
+// Conv3DBackpropFilterV2Dilations sets the optional dilations attribute to value.
//
-// @compatibility(numpy)
-// Equivalent to np.linalg.triangular_solve
-// @end_compatibility
-// If not specified, defaults to false
-func MatrixTriangularSolveAdjoint(value bool) MatrixTriangularSolveAttr {
+// value: 1-D tensor of length 5. The dilation factor for each dimension of
+// `input`. If set to k > 1, there will be k-1 skipped cells between each
+// filter element on that dimension. The dimension order is determined by the
+// value of `data_format`, see above for details. Dilations in the batch and
+// depth dimensions must be 1.
+// If not specified, defaults to <i:1 i:1 i:1 i:1 i:1 >
+func Conv3DBackpropFilterV2Dilations(value []int64) Conv3DBackpropFilterV2Attr {
return func(m optionalAttr) {
- m["adjoint"] = value
+ m["dilations"] = value
}
}
-// Solves systems of linear equations with upper or lower triangular matrices by
-//
-// backsubstitution.
-//
-// `matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form
-// square matrices. If `lower` is `True` then the strictly upper triangular part
-// of each inner-most matrix is assumed to be zero and not accessed.
-// If `lower` is False then the strictly lower triangular part of each inner-most
-// matrix is assumed to be zero and not accessed.
-// `rhs` is a tensor of shape `[..., M, K]`.
-//
-// The output is a tensor of shape `[..., M, K]`. If `adjoint` is
-// `True` then the innermost matrices in `output` satisfy matrix equations
-// `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
-// If `adjoint` is `False` then the strictly then the innermost matrices in
-// `output` satisfy matrix equations
-// `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`.
+// Computes the gradients of 3-D convolution with respect to the filter.
//
// Arguments:
-// matrix: Shape is `[..., M, M]`.
-// rhs: Shape is `[..., M, K]`.
-//
-// Returns Shape is `[..., M, K]`.
-func MatrixTriangularSolve(scope *Scope, matrix tf.Output, rhs tf.Output, optional ...MatrixTriangularSolveAttr) (output tf.Output) {
+// input: Shape `[batch, depth, rows, cols, in_channels]`.
+// filter_sizes: An integer vector representing the tensor shape of `filter`,
+// where `filter` is a 5-D
+// `[filter_depth, filter_height, filter_width, in_channels, out_channels]`
+// tensor.
+// out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
+// out_channels]`.
+// strides: 1-D tensor of length 5. The stride of the sliding window for each
+// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
+// padding: The type of padding algorithm to use.
+func Conv3DBackpropFilterV2(scope *Scope, input tf.Output, filter_sizes tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv3DBackpropFilterV2Attr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"strides": strides, "padding": padding}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "MatrixTriangularSolve",
+ Type: "Conv3DBackpropFilterV2",
Input: []tf.Input{
- matrix, rhs,
+ input, filter_sizes, out_backprop,
},
Attrs: attrs,
}
@@ -11278,321 +11445,329 @@ func MatrixTriangularSolve(scope *Scope, matrix tf.Output, rhs tf.Output, option
return op.Output(0)
}
-// Computes fingerprints of the input strings.
+// Execute a sub graph on a remote processor.
+//
+// The graph specifications(such as graph itself, input tensors and output names)
+// are stored as a serialized protocol buffer of RemoteFusedGraphExecuteInfo
+// as serialized_remote_fused_graph_execute_info.
+// The specifications will be passed to a dedicated registered
+// remote fused graph executor. The executor will send the graph specifications
+// to a remote processor and execute that graph. The execution results
+// will be passed to consumer nodes as outputs of this node.
//
// Arguments:
-// input: vector of strings to compute fingerprints on.
+// inputs: Arbitrary number of tensors with arbitrary data types
//
-// Returns a (N,2) shaped matrix where N is the number of elements in the input
-// vector. Each row contains the low and high parts of the fingerprint.
-func SdcaFprint(scope *Scope, input tf.Output) (output tf.Output) {
+// serialized_remote_fused_graph_execute_info: Serialized protocol buffer
+// of RemoteFusedGraphExecuteInfo which contains graph specifications.
+//
+// Returns Arbitrary number of tensors with arbitrary data types
+func RemoteFusedGraphExecute(scope *Scope, inputs []tf.Output, Toutputs []tf.DataType, serialized_remote_fused_graph_execute_info string) (outputs []tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"Toutputs": Toutputs, "serialized_remote_fused_graph_execute_info": serialized_remote_fused_graph_execute_info}
opspec := tf.OpSpec{
- Type: "SdcaFprint",
+ Type: "RemoteFusedGraphExecute",
Input: []tf.Input{
- input,
+ tf.OutputList(inputs),
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// SparseMatMulAttr is an optional argument to SparseMatMul.
-type SparseMatMulAttr func(optionalAttr)
-
-// SparseMatMulTransposeA sets the optional transpose_a attribute to value.
-// If not specified, defaults to false
-func SparseMatMulTransposeA(value bool) SparseMatMulAttr {
- return func(m optionalAttr) {
- m["transpose_a"] = value
+ if scope.Err() != nil {
+ return
}
-}
-
-// SparseMatMulTransposeB sets the optional transpose_b attribute to value.
-// If not specified, defaults to false
-func SparseMatMulTransposeB(value bool) SparseMatMulAttr {
- return func(m optionalAttr) {
- m["transpose_b"] = value
+ var idx int
+ var err error
+ if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
+ scope.UpdateErr("RemoteFusedGraphExecute", err)
+ return
}
+ return outputs
}
-// SparseMatMulAIsSparse sets the optional a_is_sparse attribute to value.
-// If not specified, defaults to false
-func SparseMatMulAIsSparse(value bool) SparseMatMulAttr {
+// ThreadUnsafeUnigramCandidateSamplerAttr is an optional argument to ThreadUnsafeUnigramCandidateSampler.
+type ThreadUnsafeUnigramCandidateSamplerAttr func(optionalAttr)
+
+// ThreadUnsafeUnigramCandidateSamplerSeed sets the optional seed attribute to value.
+//
+// value: If either seed or seed2 are set to be non-zero, the random number
+// generator is seeded by the given seed. Otherwise, it is seeded by a
+// random seed.
+// If not specified, defaults to 0
+func ThreadUnsafeUnigramCandidateSamplerSeed(value int64) ThreadUnsafeUnigramCandidateSamplerAttr {
return func(m optionalAttr) {
- m["a_is_sparse"] = value
+ m["seed"] = value
}
}
-// SparseMatMulBIsSparse sets the optional b_is_sparse attribute to value.
-// If not specified, defaults to false
-func SparseMatMulBIsSparse(value bool) SparseMatMulAttr {
+// ThreadUnsafeUnigramCandidateSamplerSeed2 sets the optional seed2 attribute to value.
+//
+// value: An second seed to avoid seed collision.
+// If not specified, defaults to 0
+func ThreadUnsafeUnigramCandidateSamplerSeed2(value int64) ThreadUnsafeUnigramCandidateSamplerAttr {
return func(m optionalAttr) {
- m["b_is_sparse"] = value
+ m["seed2"] = value
}
}
-// Multiply matrix "a" by matrix "b".
+// Generates labels for candidate sampling with a learned unigram distribution.
//
-// The inputs must be two-dimensional matrices and the inner dimension of "a" must
-// match the outer dimension of "b". This op is optimized for the case where at
-// least one of "a" or "b" is sparse. The breakeven for using this versus a dense
-// matrix multiply on one platform was 30% zero values in the sparse matrix.
+// See explanations of candidate sampling and the data formats at
+// go/candidate-sampling.
//
-// The gradient computation of this operation will only take advantage of sparsity
-// in the input gradient when that gradient comes from a Relu.
-func SparseMatMul(scope *Scope, a tf.Output, b tf.Output, optional ...SparseMatMulAttr) (product tf.Output) {
+// For each batch, this op picks a single set of sampled candidate labels.
+//
+// The advantages of sampling candidates per-batch are simplicity and the
+// possibility of efficient dense matrix multiplication. The disadvantage is that
+// the sampled candidates must be chosen independently of the context and of the
+// true labels.
+//
+// Arguments:
+// true_classes: A batch_size * num_true matrix, in which each row contains the
+// IDs of the num_true target_classes in the corresponding original label.
+// num_true: Number of true labels per context.
+// num_sampled: Number of candidates to randomly sample.
+// unique: If unique is true, we sample with rejection, so that all sampled
+// candidates in a batch are unique. This requires some approximation to
+// estimate the post-rejection sampling probabilities.
+// range_max: The sampler will sample integers from the interval [0, range_max).
+//
+// Returns A vector of length num_sampled, in which each element is
+// the ID of a sampled candidate.A batch_size * num_true matrix, representing
+// the number of times each candidate is expected to occur in a batch
+// of sampled candidates. If unique=true, then this is a probability.A vector of length num_sampled, for each sampled
+// candidate representing the number of times the candidate is expected
+// to occur in a batch of sampled candidates. If unique=true, then this is a
+// probability.
+func ThreadUnsafeUnigramCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...ThreadUnsafeUnigramCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "SparseMatMul",
+ Type: "ThreadUnsafeUnigramCandidateSampler",
Input: []tf.Input{
- a, b,
+ true_classes,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// SdcaOptimizerAttr is an optional argument to SdcaOptimizer.
-type SdcaOptimizerAttr func(optionalAttr)
+// MaxPoolV2Attr is an optional argument to MaxPoolV2.
+type MaxPoolV2Attr func(optionalAttr)
-// SdcaOptimizerAdaptative sets the optional adaptative attribute to value.
+// MaxPoolV2DataFormat sets the optional data_format attribute to value.
//
-// value: Whether to use Adapative SDCA for the inner loop.
-// If not specified, defaults to false
-func SdcaOptimizerAdaptative(value bool) SdcaOptimizerAttr {
+// value: Specify the data format of the input and output data. With the
+// default format "NHWC", the data is stored in the order of:
+// [batch, in_height, in_width, in_channels].
+// Alternatively, the format could be "NCHW", the data storage order of:
+// [batch, in_channels, in_height, in_width].
+// If not specified, defaults to "NHWC"
+func MaxPoolV2DataFormat(value string) MaxPoolV2Attr {
return func(m optionalAttr) {
- m["adaptative"] = value
+ m["data_format"] = value
}
}
-// Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for
-//
-// linear models with L1 + L2 regularization. As global optimization objective is
-// strongly-convex, the optimizer optimizes the dual objective at each step. The
-// optimizer applies each update one example at a time. Examples are sampled
-// uniformly, and the optimizer is learning rate free and enjoys linear convergence
-// rate.
-//
-// [Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).<br>
-// Shai Shalev-Shwartz, Tong Zhang. 2012
-//
-// $$Loss Objective = \sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$
-//
-// [Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).<br>
-// Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan,
-// Peter Richtarik, Martin Takac. 2015
-//
-// [Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).<br>
-// Dominik Csiba, Zheng Qu, Peter Richtarik. 2015
+// Performs max pooling on the input.
//
// Arguments:
-// sparse_example_indices: a list of vectors which contain example indices.
-// sparse_feature_indices: a list of vectors which contain feature indices.
-// sparse_feature_values: a list of vectors which contains feature value
-// associated with each feature group.
-// dense_features: a list of matrices which contains the dense feature values.
-// example_weights: a vector which contains the weight associated with each
-// example.
-// example_labels: a vector which contains the label/target associated with each
-// example.
-// sparse_indices: a list of vectors where each value is the indices which has
-// corresponding weights in sparse_weights. This field maybe omitted for the
-// dense approach.
-// sparse_weights: a list of vectors where each value is the weight associated with
-// a sparse feature group.
-// dense_weights: a list of vectors where the values are the weights associated
-// with a dense feature group.
-// example_state_data: a list of vectors containing the example state data.
-// loss_type: Type of the primal loss. Currently SdcaSolver supports logistic,
-// squared and hinge losses.
-// l1: Symmetric l1 regularization strength.
-// l2: Symmetric l2 regularization strength.
-// num_loss_partitions: Number of partitions of the global loss function.
-// num_inner_iterations: Number of iterations per mini-batch.
+// input: 4-D input to pool over.
+// ksize: The size of the window for each dimension of the input tensor.
+// strides: The stride of the sliding window for each dimension of the
+// input tensor.
+// padding: The type of padding algorithm to use.
//
-// Returns a list of vectors containing the updated example state
-// data.a list of vectors where each value is the delta
-// weights associated with a sparse feature group.a list of vectors where the values are the delta
-// weights associated with a dense feature group.
-func SdcaOptimizer(scope *Scope, sparse_example_indices []tf.Output, sparse_feature_indices []tf.Output, sparse_feature_values []tf.Output, dense_features []tf.Output, example_weights tf.Output, example_labels tf.Output, sparse_indices []tf.Output, sparse_weights []tf.Output, dense_weights []tf.Output, example_state_data tf.Output, loss_type string, l1 float32, l2 float32, num_loss_partitions int64, num_inner_iterations int64, optional ...SdcaOptimizerAttr) (out_example_state_data tf.Output, out_delta_sparse_weights []tf.Output, out_delta_dense_weights []tf.Output) {
+// Returns The max pooled output tensor.
+func MaxPoolV2(scope *Scope, input tf.Output, ksize tf.Output, strides tf.Output, padding string, optional ...MaxPoolV2Attr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"loss_type": loss_type, "l1": l1, "l2": l2, "num_loss_partitions": num_loss_partitions, "num_inner_iterations": num_inner_iterations}
+ attrs := map[string]interface{}{"padding": padding}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "SdcaOptimizer",
+ Type: "MaxPoolV2",
Input: []tf.Input{
- tf.OutputList(sparse_example_indices), tf.OutputList(sparse_feature_indices), tf.OutputList(sparse_feature_values), tf.OutputList(dense_features), example_weights, example_labels, tf.OutputList(sparse_indices), tf.OutputList(sparse_weights), tf.OutputList(dense_weights), example_state_data,
+ input, ksize, strides,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
+// Deprecated. Use TensorArrayReadV3
+func TensorArrayReadV2(scope *Scope, handle tf.Output, index tf.Output, flow_in tf.Output, dtype tf.DataType) (value tf.Output) {
if scope.Err() != nil {
return
}
- var idx int
- var err error
- out_example_state_data = op.Output(idx)
- if out_delta_sparse_weights, idx, err = makeOutputList(op, idx, "out_delta_sparse_weights"); err != nil {
- scope.UpdateErr("SdcaOptimizer", err)
- return
- }
- if out_delta_dense_weights, idx, err = makeOutputList(op, idx, "out_delta_dense_weights"); err != nil {
- scope.UpdateErr("SdcaOptimizer", err)
- return
+ attrs := map[string]interface{}{"dtype": dtype}
+ opspec := tf.OpSpec{
+ Type: "TensorArrayReadV2",
+ Input: []tf.Input{
+ handle, index, flow_in,
+ },
+ Attrs: attrs,
}
- return out_example_state_data, out_delta_sparse_weights, out_delta_dense_weights
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Computes the minimum along segments of a tensor.
+// Does nothing. Serves as a control trigger for scheduling.
//
-// Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
-// segments.
+// Only useful as a placeholder for control edges.
//
-// Computes a tensor such that
-// \\(output_i = \min_j(data_j)\\) where `min` is over `j` such
-// that `segment_ids[j] == i`.
+// Returns the created operation.
+func ControlTrigger(scope *Scope) (o *tf.Operation) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "ControlTrigger",
+ }
+ return scope.AddOperation(opspec)
+}
+
+// Batch normalization.
//
-// If the min is empty for a given segment ID `i`, `output[i] = 0`.
+// DEPRECATED at GraphDef version 9: Use tf.nn.batch_normalization()
//
-// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMin.png" alt>
-// </div>
+// This op is deprecated. Prefer `tf.nn.batch_normalization`.
//
// Arguments:
-//
-// segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s
-// first dimension. Values should be sorted and can be repeated.
-//
-// Returns Has same shape as data, except for dimension 0 which
-// has size `k`, the number of segments.
-func SegmentMin(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
+// t: A 4D input Tensor.
+// m: A 1D mean Tensor with size matching the last dimension of t.
+// This is the first output from tf.nn.moments,
+// or a saved moving average thereof.
+// v: A 1D variance Tensor with size matching the last dimension of t.
+// This is the second output from tf.nn.moments,
+// or a saved moving average thereof.
+// beta: A 1D beta Tensor with size matching the last dimension of t.
+// An offset to be added to the normalized tensor.
+// gamma: A 1D gamma Tensor with size matching the last dimension of t.
+// If "scale_after_normalization" is true, this tensor will be multiplied
+// with the normalized tensor.
+// variance_epsilon: A small float number to avoid dividing by 0.
+// scale_after_normalization: A bool indicating whether the resulted tensor
+// needs to be multiplied with gamma.
+func BatchNormWithGlobalNormalization(scope *Scope, t tf.Output, m tf.Output, v tf.Output, beta tf.Output, gamma tf.Output, variance_epsilon float32, scale_after_normalization bool) (result tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"variance_epsilon": variance_epsilon, "scale_after_normalization": scale_after_normalization}
opspec := tf.OpSpec{
- Type: "SegmentMin",
+ Type: "BatchNormWithGlobalNormalization",
Input: []tf.Input{
- data, segment_ids,
+ t, m, v, beta, gamma,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// QuantizedResizeBilinearAttr is an optional argument to QuantizedResizeBilinear.
-type QuantizedResizeBilinearAttr func(optionalAttr)
+// MutableDenseHashTableV2Attr is an optional argument to MutableDenseHashTableV2.
+type MutableDenseHashTableV2Attr func(optionalAttr)
-// QuantizedResizeBilinearAlignCorners sets the optional align_corners attribute to value.
+// MutableDenseHashTableV2Container sets the optional container attribute to value.
//
-// value: If true, rescale input by (new_height - 1) / (height - 1), which
-// exactly aligns the 4 corners of images and resized images. If false, rescale
-// by new_height / height. Treat similarly the width dimension.
-// If not specified, defaults to false
-func QuantizedResizeBilinearAlignCorners(value bool) QuantizedResizeBilinearAttr {
+// value: If non-empty, this table is placed in the given container.
+// Otherwise, a default container is used.
+// If not specified, defaults to ""
+func MutableDenseHashTableV2Container(value string) MutableDenseHashTableV2Attr {
return func(m optionalAttr) {
- m["align_corners"] = value
+ m["container"] = value
}
}
-// Resize quantized `images` to `size` using quantized bilinear interpolation.
-//
-// Input images and output images must be quantized types.
-//
-// Arguments:
-// images: 4-D with shape `[batch, height, width, channels]`.
-// size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The
-// new size for the images.
-//
-//
+// MutableDenseHashTableV2SharedName sets the optional shared_name attribute to value.
//
-// Returns 4-D with shape
-// `[batch, new_height, new_width, channels]`.
-func QuantizedResizeBilinear(scope *Scope, images tf.Output, size tf.Output, min tf.Output, max tf.Output, optional ...QuantizedResizeBilinearAttr) (resized_images tf.Output, out_min tf.Output, out_max tf.Output) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
+// value: If non-empty, this table is shared under the given name across
+// multiple sessions.
+// If not specified, defaults to ""
+func MutableDenseHashTableV2SharedName(value string) MutableDenseHashTableV2Attr {
+ return func(m optionalAttr) {
+ m["shared_name"] = value
}
- opspec := tf.OpSpec{
- Type: "QuantizedResizeBilinear",
- Input: []tf.Input{
- images, size, min, max,
- },
- Attrs: attrs,
+}
+
+// MutableDenseHashTableV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
+// If not specified, defaults to false
+func MutableDenseHashTableV2UseNodeNameSharing(value bool) MutableDenseHashTableV2Attr {
+ return func(m optionalAttr) {
+ m["use_node_name_sharing"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
}
-// RestoreAttr is an optional argument to Restore.
-type RestoreAttr func(optionalAttr)
+// MutableDenseHashTableV2ValueShape sets the optional value_shape attribute to value.
+//
+// value: The shape of each value.
+// If not specified, defaults to <>
+func MutableDenseHashTableV2ValueShape(value tf.Shape) MutableDenseHashTableV2Attr {
+ return func(m optionalAttr) {
+ m["value_shape"] = value
+ }
+}
-// RestorePreferredShard sets the optional preferred_shard attribute to value.
+// MutableDenseHashTableV2InitialNumBuckets sets the optional initial_num_buckets attribute to value.
//
-// value: Index of file to open first if multiple files match
-// `file_pattern`.
-// If not specified, defaults to -1
-func RestorePreferredShard(value int64) RestoreAttr {
+// value: The initial number of hash table buckets. Must be a power
+// to 2.
+// If not specified, defaults to 131072
+func MutableDenseHashTableV2InitialNumBuckets(value int64) MutableDenseHashTableV2Attr {
return func(m optionalAttr) {
- m["preferred_shard"] = value
+ m["initial_num_buckets"] = value
}
}
-// Restores a tensor from checkpoint files.
+// MutableDenseHashTableV2MaxLoadFactor sets the optional max_load_factor attribute to value.
//
-// Reads a tensor stored in one or several files. If there are several files (for
-// instance because a tensor was saved as slices), `file_pattern` may contain
-// wildcard symbols (`*` and `?`) in the filename portion only, not in the
-// directory portion.
+// value: The maximum ratio between number of entries and number of
+// buckets before growing the table. Must be between 0 and 1.
+// If not specified, defaults to 0.8
+func MutableDenseHashTableV2MaxLoadFactor(value float32) MutableDenseHashTableV2Attr {
+ return func(m optionalAttr) {
+ m["max_load_factor"] = value
+ }
+}
+
+// Creates an empty hash table that uses tensors as the backing store.
//
-// If a `file_pattern` matches several files, `preferred_shard` can be used to hint
-// in which file the requested tensor is likely to be found. This op will first
-// open the file at index `preferred_shard` in the list of matching files and try
-// to restore tensors from that file. Only if some tensors or tensor slices are
-// not found in that first file, then the Op opens all the files. Setting
-// `preferred_shard` to match the value passed as the `shard` input
-// of a matching `Save` Op may speed up Restore. This attribute only affects
-// performance, not correctness. The default value -1 means files are processed in
-// order.
+// It uses "open addressing" with quadratic reprobing to resolve
+// collisions.
//
-// See also `RestoreSlice`.
+// This op creates a mutable hash table, specifying the type of its keys and
+// values. Each value must be a scalar. Data can be inserted into the table using
+// the insert operations. It does not support the initialization operation.
//
// Arguments:
-// file_pattern: Must have a single element. The pattern of the files from
-// which we read the tensor.
-// tensor_name: Must have a single element. The name of the tensor to be
-// restored.
-// dt: The type of the tensor to be restored.
+// empty_key: The key used to represent empty key buckets internally. Must not
+// be used in insert or lookup operations.
+// value_dtype: Type of the table values.
//
-// Returns The restored tensor.
-func Restore(scope *Scope, file_pattern tf.Output, tensor_name tf.Output, dt tf.DataType, optional ...RestoreAttr) (tensor tf.Output) {
+// Returns Handle to a table.
+func MutableDenseHashTableV2(scope *Scope, empty_key tf.Output, value_dtype tf.DataType, optional ...MutableDenseHashTableV2Attr) (table_handle tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dt": dt}
+ attrs := map[string]interface{}{"value_dtype": value_dtype}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "Restore",
+ Type: "MutableDenseHashTableV2",
Input: []tf.Input{
- file_pattern, tensor_name,
+ empty_key,
},
Attrs: attrs,
}
@@ -11600,279 +11775,248 @@ func Restore(scope *Scope, file_pattern tf.Output, tensor_name tf.Output, dt tf.
return op.Output(0)
}
-// WriteAudioSummaryAttr is an optional argument to WriteAudioSummary.
-type WriteAudioSummaryAttr func(optionalAttr)
-
-// WriteAudioSummaryMaxOutputs sets the optional max_outputs attribute to value.
-//
-// value: Max number of batch elements to generate audio for.
-// If not specified, defaults to 3
-//
-// REQUIRES: value >= 1
-func WriteAudioSummaryMaxOutputs(value int64) WriteAudioSummaryAttr {
- return func(m optionalAttr) {
- m["max_outputs"] = value
- }
-}
-
-// Writes a `Summary` protocol buffer with audio.
-//
-// The summary has up to `max_outputs` summary values containing audio. The
-// audio is built from `tensor` which must be 3-D with shape `[batch_size,
-// frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
-// assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
-//
-// The `tag` argument is a scalar `Tensor` of type `string`. It is used to
-// build the `tag` of the summary values:
-//
-// * If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
-// * If `max_outputs` is greater than 1, the summary value tags are
-// generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
+// Produces the max pool of the input tensor for quantized types.
//
// Arguments:
-// writer: A handle to a summary writer.
-// step: The step to write the summary for.
-// tag: Scalar. Used to build the `tag` attribute of the summary values.
-// tensor: 2-D of shape `[batch_size, frames]`.
-// sample_rate: The sample rate of the signal in hertz.
+// input: The 4D (batch x rows x cols x depth) Tensor to MaxReduce over.
+// min_input: The float value that the lowest quantized input value represents.
+// max_input: The float value that the highest quantized input value represents.
+// ksize: The size of the window for each dimension of the input tensor.
+// The length must be 4 to match the number of dimensions of the input.
+// strides: The stride of the sliding window for each dimension of the input
+// tensor. The length must be 4 to match the number of dimensions of the input.
+// padding: The type of padding algorithm to use.
//
-// Returns the created operation.
-func WriteAudioSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, tensor tf.Output, sample_rate tf.Output, optional ...WriteAudioSummaryAttr) (o *tf.Operation) {
+// Returns The float value that the lowest quantized output value represents.The float value that the highest quantized output value represents.
+func QuantizedMaxPool(scope *Scope, input tf.Output, min_input tf.Output, max_input tf.Output, ksize []int64, strides []int64, padding string) (output tf.Output, min_output tf.Output, max_output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
+ attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
opspec := tf.OpSpec{
- Type: "WriteAudioSummary",
+ Type: "QuantizedMaxPool",
Input: []tf.Input{
- writer, step, tag, tensor, sample_rate,
+ input, min_input, max_input,
},
Attrs: attrs,
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// FusedResizeAndPadConv2DAttr is an optional argument to FusedResizeAndPadConv2D.
-type FusedResizeAndPadConv2DAttr func(optionalAttr)
+// Computes softplus: `log(exp(features) + 1)`.
+func Softplus(scope *Scope, features tf.Output) (activations tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "Softplus",
+ Input: []tf.Input{
+ features,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
-// FusedResizeAndPadConv2DResizeAlignCorners sets the optional resize_align_corners attribute to value.
+// Computes exponential of x - 1 element-wise.
//
-// value: If true, rescale input by (new_height - 1) / (height - 1),
-// which exactly aligns the 4 corners of images and resized images. If false, rescale
-// by new_height / height. Treat similarly the width dimension.
-// If not specified, defaults to false
-func FusedResizeAndPadConv2DResizeAlignCorners(value bool) FusedResizeAndPadConv2DAttr {
- return func(m optionalAttr) {
- m["resize_align_corners"] = value
+// I.e., \\(y = (\exp x) - 1\\).
+func Expm1(scope *Scope, x tf.Output) (y tf.Output) {
+ if scope.Err() != nil {
+ return
}
+ opspec := tf.OpSpec{
+ Type: "Expm1",
+ Input: []tf.Input{
+ x,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Performs a resize and padding as a preprocess during a convolution.
+// Returns the number of records this Reader has produced.
//
-// It's often possible to do spatial transformations more efficiently as part of
-// the packing stage of a convolution, so this op allows for an optimized
-// implementation where these stages are fused together. This prevents the need to
-// write out the intermediate results as whole tensors, reducing memory pressure,
-// and we can get some latency gains by merging the transformation calculations.
-// The data_format attribute for Conv2D isn't supported by this op, and defaults to
-// 'NHWC' order.
-// Internally this op uses a single per-graph scratch buffer, which means that it
-// will block if multiple versions are being run in parallel. This is because this
-// operator is primarily an optimization to minimize memory usage.
+// This is the same as the number of ReaderRead executions that have
+// succeeded.
//
// Arguments:
-// input: 4-D with shape `[batch, in_height, in_width, in_channels]`.
-// size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The
-// new size for the images.
-// paddings: A two-column matrix specifying the padding sizes. The number of
-// rows must be the same as the rank of `input`.
-// filter: 4-D with shape
-// `[filter_height, filter_width, in_channels, out_channels]`.
-//
-// strides: 1-D of length 4. The stride of the sliding window for each dimension
-// of `input`. Must be in the same order as the dimension specified with format.
-// padding: The type of padding algorithm to use.
-func FusedResizeAndPadConv2D(scope *Scope, input tf.Output, size tf.Output, paddings tf.Output, filter tf.Output, mode string, strides []int64, padding string, optional ...FusedResizeAndPadConv2DAttr) (output tf.Output) {
+// reader_handle: Handle to a Reader.
+func ReaderNumRecordsProducedV2(scope *Scope, reader_handle tf.Output) (records_produced tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"mode": mode, "strides": strides, "padding": padding}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "FusedResizeAndPadConv2D",
+ Type: "ReaderNumRecordsProducedV2",
Input: []tf.Input{
- input, size, paddings, filter,
+ reader_handle,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// DenseToSparseSetOperationAttr is an optional argument to DenseToSparseSetOperation.
-type DenseToSparseSetOperationAttr func(optionalAttr)
-
-// DenseToSparseSetOperationValidateIndices sets the optional validate_indices attribute to value.
-// If not specified, defaults to true
-func DenseToSparseSetOperationValidateIndices(value bool) DenseToSparseSetOperationAttr {
- return func(m optionalAttr) {
- m["validate_indices"] = value
- }
-}
-
-// Applies set operation along last dimension of `Tensor` and `SparseTensor`.
+// Computes the sum along segments of a tensor.
//
-// See SetOperationOp::SetOperationFromContext for values of `set_operation`.
+// Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+// segments.
//
-// Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,
-// and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same
-// as `set1`. Dimension `n` contains values in a set, duplicates are allowed but
-// ignored.
+// Computes a tensor such that
+// \\(output_i = \sum_j data_j\\) where sum is over `j` such
+// that `segment_ids[j] == i`.
//
-// If `validate_indices` is `True`, this op validates the order and range of `set2`
-// indices.
+// If the sum is empty for a given segment ID `i`, `output[i] = 0`.
//
-// Output `result` is a `SparseTensor` represented by `result_indices`,
-// `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
-// has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
-// dimension contains the result of `set_operation` applied to the corresponding
-// `[0...n-1]` dimension of `set`.
+// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentSum.png" alt>
+// </div>
//
// Arguments:
-// set1: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.
-// Dimension `n` contains values in a set, duplicates are allowed but ignored.
-// set2_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
-// order.
-// set2_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major
-// order.
-// set2_shape: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must
-// be the same as the 1st `n-1` dimensions of `set1`, `result_shape[n]` is the
-// max set size across `n-1` dimensions.
//
+// segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s
+// first dimension. Values should be sorted and can be repeated.
//
-// Returns 2D indices of a `SparseTensor`.1D values of a `SparseTensor`.1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
-// the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`
-// is the max result set size across all `0...n-1` dimensions.
-func DenseToSparseSetOperation(scope *Scope, set1 tf.Output, set2_indices tf.Output, set2_values tf.Output, set2_shape tf.Output, set_operation string, optional ...DenseToSparseSetOperationAttr) (result_indices tf.Output, result_values tf.Output, result_shape tf.Output) {
+// Returns Has same shape as data, except for dimension 0 which
+// has size `k`, the number of segments.
+func SegmentSum(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"set_operation": set_operation}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "DenseToSparseSetOperation",
+ Type: "SegmentSum",
Input: []tf.Input{
- set1, set2_indices, set2_values, set2_shape,
+ data, segment_ids,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
+ return op.Output(0)
}
-// Delete the tensor specified by its handle in the session.
+// Creates a dataset that emits the lines of one or more text files.
//
// Arguments:
-// handle: The handle for a tensor stored in the session state.
-//
-// Returns the created operation.
-func DeleteSessionTensor(scope *Scope, handle tf.Output) (o *tf.Operation) {
+// filenames: A scalar or a vector containing the name(s) of the file(s) to be
+// read.
+// compression_type: A scalar containing either (i) the empty string (no
+// compression), (ii) "ZLIB", or (iii) "GZIP".
+// buffer_size: A scalar containing the number of bytes to buffer.
+func TextLineDataset(scope *Scope, filenames tf.Output, compression_type tf.Output, buffer_size tf.Output) (handle tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "DeleteSessionTensor",
+ Type: "TextLineDataset",
Input: []tf.Input{
- handle,
+ filenames, compression_type, buffer_size,
},
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// DenseToDenseSetOperationAttr is an optional argument to DenseToDenseSetOperation.
-type DenseToDenseSetOperationAttr func(optionalAttr)
-
-// DenseToDenseSetOperationValidateIndices sets the optional validate_indices attribute to value.
-// If not specified, defaults to true
-func DenseToDenseSetOperationValidateIndices(value bool) DenseToDenseSetOperationAttr {
- return func(m optionalAttr) {
- m["validate_indices"] = value
+// Checks whether a resource handle-based variable has been initialized.
+//
+// Arguments:
+// resource: the input resource handle.
+//
+// Returns a scalar boolean which is true if the variable has been
+// initialized.
+func VarIsInitializedOp(scope *Scope, resource tf.Output) (is_initialized tf.Output) {
+ if scope.Err() != nil {
+ return
}
+ opspec := tf.OpSpec{
+ Type: "VarIsInitializedOp",
+ Input: []tf.Input{
+ resource,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Applies set operation along last dimension of 2 `Tensor` inputs.
+// Pads a tensor with zeros.
//
-// See SetOperationOp::SetOperationFromContext for values of `set_operation`.
+// This operation pads a `input` with zeros according to the `paddings` you
+// specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the
+// rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
+// how many zeros to add before the contents of `input` in that dimension, and
+// `paddings[D, 1]` indicates how many zeros to add after the contents of `input`
+// in that dimension.
//
-// Output `result` is a `SparseTensor` represented by `result_indices`,
-// `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
-// has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
-// dimension contains the result of `set_operation` applied to the corresponding
-// `[0...n-1]` dimension of `set`.
+// The padded size of each dimension D of the output is:
//
-// Arguments:
-// set1: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.
-// Dimension `n` contains values in a set, duplicates are allowed but ignored.
-// set2: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`.
-// Dimension `n` contains values in a set, duplicates are allowed but ignored.
+// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
//
+// For example:
//
-// Returns 2D indices of a `SparseTensor`.1D values of a `SparseTensor`.1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
-// the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`
-// is the max result set size across all `0...n-1` dimensions.
-func DenseToDenseSetOperation(scope *Scope, set1 tf.Output, set2 tf.Output, set_operation string, optional ...DenseToDenseSetOperationAttr) (result_indices tf.Output, result_values tf.Output, result_shape tf.Output) {
+// ```
+// # 't' is [[1, 1], [2, 2]]
+// # 'paddings' is [[1, 1], [2, 2]]
+// # rank of 't' is 2
+// pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
+// [0, 0, 1, 1, 0, 0]
+// [0, 0, 2, 2, 0, 0]
+// [0, 0, 0, 0, 0, 0]]
+// ```
+func Pad(scope *Scope, input tf.Output, paddings tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"set_operation": set_operation}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "DenseToDenseSetOperation",
+ Type: "Pad",
Input: []tf.Input{
- set1, set2,
+ input, paddings,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
+ return op.Output(0)
}
-// SumAttr is an optional argument to Sum.
-type SumAttr func(optionalAttr)
+// SparseTensorDenseMatMulAttr is an optional argument to SparseTensorDenseMatMul.
+type SparseTensorDenseMatMulAttr func(optionalAttr)
-// SumKeepDims sets the optional keep_dims attribute to value.
+// SparseTensorDenseMatMulAdjointA sets the optional adjoint_a attribute to value.
//
-// value: If true, retain reduced dimensions with length 1.
+// value: Use the adjoint of A in the matrix multiply. If A is complex, this
+// is transpose(conj(A)). Otherwise it's transpose(A).
// If not specified, defaults to false
-func SumKeepDims(value bool) SumAttr {
+func SparseTensorDenseMatMulAdjointA(value bool) SparseTensorDenseMatMulAttr {
return func(m optionalAttr) {
- m["keep_dims"] = value
+ m["adjoint_a"] = value
}
}
-// Computes the sum of elements across dimensions of a tensor.
+// SparseTensorDenseMatMulAdjointB sets the optional adjoint_b attribute to value.
//
-// Reduces `input` along the dimensions given in `axis`. Unless
-// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
-// `axis`. If `keep_dims` is true, the reduced dimensions are
-// retained with length 1.
+// value: Use the adjoint of B in the matrix multiply. If B is complex, this
+// is transpose(conj(B)). Otherwise it's transpose(B).
+// If not specified, defaults to false
+func SparseTensorDenseMatMulAdjointB(value bool) SparseTensorDenseMatMulAttr {
+ return func(m optionalAttr) {
+ m["adjoint_b"] = value
+ }
+}
+
+// Multiply SparseTensor (of rank 2) "A" by dense matrix "B".
//
-// Arguments:
-// input: The tensor to reduce.
-// axis: The dimensions to reduce. Must be in the range
-// `[-rank(input), rank(input))`.
+// No validity checking is performed on the indices of A. However, the following
+// input format is recommended for optimal behavior:
//
-// Returns The reduced tensor.
-func Sum(scope *Scope, input tf.Output, axis tf.Output, optional ...SumAttr) (output tf.Output) {
+// if adjoint_a == false:
+// A should be sorted in lexicographically increasing order. Use SparseReorder
+// if you're not sure.
+// if adjoint_a == true:
+// A should be sorted in order of increasing dimension 1 (i.e., "column major"
+// order instead of "row major" order).
+//
+// Arguments:
+// a_indices: 2-D. The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix.
+// a_values: 1-D. The `values` of the `SparseTensor`, size `[nnz]` Vector.
+// a_shape: 1-D. The `shape` of the `SparseTensor`, size `[2]` Vector.
+// b: 2-D. A dense Matrix.
+func SparseTensorDenseMatMul(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b tf.Output, optional ...SparseTensorDenseMatMulAttr) (product tf.Output) {
if scope.Err() != nil {
return
}
@@ -11881,9 +12025,9 @@ func Sum(scope *Scope, input tf.Output, axis tf.Output, optional ...SumAttr) (ou
a(attrs)
}
opspec := tf.OpSpec{
- Type: "Sum",
+ Type: "SparseTensorDenseMatMul",
Input: []tf.Input{
- input, axis,
+ a_indices, a_values, a_shape, b,
},
Attrs: attrs,
}
@@ -11891,66 +12035,92 @@ func Sum(scope *Scope, input tf.Output, axis tf.Output, optional ...SumAttr) (ou
return op.Output(0)
}
-// Computes the sign and the log of the absolute value of the determinant of
+// Deserialize and concatenate `SparseTensors` from a serialized minibatch.
//
-// one or more square matrices.
+// The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where
+// `N` is the minibatch size and the rows correspond to packed outputs of
+// `SerializeSparse`. The ranks of the original `SparseTensor` objects
+// must all match. When the final `SparseTensor` is created, it has rank one
+// higher than the ranks of the incoming `SparseTensor` objects
+// (they have been concatenated along a new row dimension).
//
-// The input is a tensor of shape `[N, M, M]` whose inner-most 2 dimensions
-// form square matrices. The outputs are two tensors containing the signs and
-// absolute values of the log determinants for all N input submatrices
-// `[..., :, :]` such that the determinant = sign*exp(log_abs_determinant).
-// The log_abs_determinant is computed as det(P)*sum(log(diag(LU))) where LU
-// is the LU decomposition of the input and P is the corresponding
-// permutation matrix.
+// The output `SparseTensor` object's shape values for all dimensions but the
+// first are the max across the input `SparseTensor` objects' shape values
+// for the corresponding dimensions. Its first shape value is `N`, the minibatch
+// size.
//
-// Arguments:
-// input: Shape is `[N, M, M]`.
+// The input `SparseTensor` objects' indices are assumed ordered in
+// standard lexicographic order. If this is not the case, after this
+// step run `SparseReorder` to restore index ordering.
//
-// Returns The signs of the log determinants of the inputs. Shape is `[N]`.The logs of the absolute values of the determinants
-// of the N input matrices. Shape is `[N]`.
-func LogMatrixDeterminant(scope *Scope, input tf.Output) (sign tf.Output, log_abs_determinant tf.Output) {
+// For example, if the serialized input is a `[2 x 3]` matrix representing two
+// original `SparseTensor` objects:
+//
+// index = [ 0]
+// [10]
+// [20]
+// values = [1, 2, 3]
+// shape = [50]
+//
+// and
+//
+// index = [ 2]
+// [10]
+// values = [4, 5]
+// shape = [30]
+//
+// then the final deserialized `SparseTensor` will be:
+//
+// index = [0 0]
+// [0 10]
+// [0 20]
+// [1 2]
+// [1 10]
+// values = [1, 2, 3, 4, 5]
+// shape = [2 50]
+//
+// Arguments:
+// serialized_sparse: 2-D, The `N` serialized `SparseTensor` objects.
+// Must have 3 columns.
+// dtype: The `dtype` of the serialized `SparseTensor` objects.
+func DeserializeManySparse(scope *Scope, serialized_sparse tf.Output, dtype tf.DataType) (sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"dtype": dtype}
opspec := tf.OpSpec{
- Type: "LogMatrixDeterminant",
+ Type: "DeserializeManySparse",
Input: []tf.Input{
- input,
+ serialized_sparse,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// SetSizeAttr is an optional argument to SetSize.
-type SetSizeAttr func(optionalAttr)
+// StringJoinAttr is an optional argument to StringJoin.
+type StringJoinAttr func(optionalAttr)
-// SetSizeValidateIndices sets the optional validate_indices attribute to value.
-// If not specified, defaults to true
-func SetSizeValidateIndices(value bool) SetSizeAttr {
+// StringJoinSeparator sets the optional separator attribute to value.
+//
+// value: string, an optional join separator.
+// If not specified, defaults to ""
+func StringJoinSeparator(value string) StringJoinAttr {
return func(m optionalAttr) {
- m["validate_indices"] = value
+ m["separator"] = value
}
}
-// Number of unique elements along last dimension of input `set`.
-//
-// Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`,
-// and `set_shape`. The last dimension contains values in a set, duplicates are
-// allowed but ignored.
+// Joins the strings in the given list of string tensors into one tensor;
//
-// If `validate_indices` is `True`, this op validates the order and range of `set`
-// indices.
+// with the given separator (default is an empty separator).
//
// Arguments:
-// set_indices: 2D `Tensor`, indices of a `SparseTensor`.
-// set_values: 1D `Tensor`, values of a `SparseTensor`.
-// set_shape: 1D `Tensor`, shape of a `SparseTensor`.
-//
-// Returns For `set` ranked `n`, this is a `Tensor` with rank `n-1`, and the same 1st
-// `n-1` dimensions as `set`. Each value is the number of unique elements in
-// the corresponding `[0...n-1]` dimension of `set`.
-func SetSize(scope *Scope, set_indices tf.Output, set_values tf.Output, set_shape tf.Output, optional ...SetSizeAttr) (size tf.Output) {
+// inputs: A list of string tensors. The tensors must all have the same shape,
+// or be scalars. Scalars may be mixed in; these will be broadcast to the shape
+// of non-scalar inputs.
+func StringJoin(scope *Scope, inputs []tf.Output, optional ...StringJoinAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -11959,9 +12129,9 @@ func SetSize(scope *Scope, set_indices tf.Output, set_values tf.Output, set_shap
a(attrs)
}
opspec := tf.OpSpec{
- Type: "SetSize",
+ Type: "StringJoin",
Input: []tf.Input{
- set_indices, set_values, set_shape,
+ tf.OutputList(inputs),
},
Attrs: attrs,
}
@@ -11969,191 +12139,202 @@ func SetSize(scope *Scope, set_indices tf.Output, set_values tf.Output, set_shap
return op.Output(0)
}
-// The gradient of SparseFillEmptyRows.
-//
-// Takes vectors reverse_index_map, shaped `[N]`, and grad_values,
-// shaped `[N_full]`, where `N_full >= N` and copies data into either
-// `d_values` or `d_default_value`. Here `d_values` is shaped `[N]` and
-// `d_default_value` is a scalar.
+// Returns immutable tensor from memory region.
//
-// d_values[j] = grad_values[reverse_index_map[j]]
-// d_default_value = sum_{k : 0 .. N_full - 1} (
-// grad_values[k] * 1{k not in reverse_index_map})
+// The current implementation memmaps the tensor from a file.
//
// Arguments:
-// reverse_index_map: 1-D. The reverse index map from SparseFillEmptyRows.
-// grad_values: 1-D. The gradients from backprop.
-//
-// Returns 1-D. The backprop into values.0-D. The backprop into default_value.
-func SparseFillEmptyRowsGrad(scope *Scope, reverse_index_map tf.Output, grad_values tf.Output) (d_values tf.Output, d_default_value tf.Output) {
+// dtype: Type of the returned tensor.
+// shape: Shape of the returned tensor.
+// memory_region_name: Name of readonly memory region used by the tensor, see
+// NewReadOnlyMemoryRegionFromFile in tensorflow::Env.
+func ImmutableConst(scope *Scope, dtype tf.DataType, shape tf.Shape, memory_region_name string) (tensor tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"dtype": dtype, "shape": shape, "memory_region_name": memory_region_name}
opspec := tf.OpSpec{
- Type: "SparseFillEmptyRowsGrad",
- Input: []tf.Input{
- reverse_index_map, grad_values,
- },
+ Type: "ImmutableConst",
+
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
+ return op.Output(0)
}
-// Assigns a new value to a variable.
+// Inverse real-valued fast Fourier transform.
//
-// Any ReadVariableOp with a control dependency on this op is guaranteed to return
-// this value or a subsequent newer value of the variable.
+// Computes the inverse 1-dimensional discrete Fourier transform of a real-valued
+// signal over the inner-most dimension of `input`.
+//
+// The inner-most dimension of `input` is assumed to be the result of `RFFT`: the
+// `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If
+// `fft_length` is not provided, it is computed from the size of the inner-most
+// dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to
+// compute `input` is odd, it should be provided since it cannot be inferred
+// properly.
+//
+// Along the axis `IRFFT` is computed on, if `fft_length / 2 + 1` is smaller
+// than the corresponding dimension of `input`, the dimension is cropped. If it is
+// larger, the dimension is padded with zeros.
//
// Arguments:
-// resource: handle to the resource in which to store the variable.
-// value: the value to set the new tensor to use.
+// input: A complex64 tensor.
+// fft_length: An int32 tensor of shape [1]. The FFT length.
//
-// Returns the created operation.
-func AssignVariableOp(scope *Scope, resource tf.Output, value tf.Output) (o *tf.Operation) {
+// Returns A float32 tensor of the same rank as `input`. The inner-most
+// dimension of `input` is replaced with the `fft_length` samples of its inverse
+// 1D Fourier transform.
+//
+// @compatibility(numpy)
+// Equivalent to np.fft.irfft
+// @end_compatibility
+func IRFFT(scope *Scope, input tf.Output, fft_length tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "AssignVariableOp",
+ Type: "IRFFT",
Input: []tf.Input{
- resource, value,
+ input, fft_length,
},
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Says whether the targets are in the top `K` predictions.
+// Concatenates a list of `SparseTensor` along the specified dimension.
//
-// This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
-// prediction for the target class is among the top `k` predictions among
-// all predictions for example `i`. Note that the behavior of `InTopK` differs
-// from the `TopK` op in its handling of ties; if multiple classes have the
-// same prediction value and straddle the top-`k` boundary, all of those
-// classes are considered to be in the top `k`.
+// Concatenation is with respect to the dense versions of these sparse tensors.
+// It is assumed that each input is a `SparseTensor` whose elements are ordered
+// along increasing dimension number.
//
-// More formally, let
+// All inputs' shapes must match, except for the concat dimension. The
+// `indices`, `values`, and `shapes` lists must have the same length.
//
-// \\(predictions_i\\) be the predictions for all classes for example `i`,
-// \\(targets_i\\) be the target class for example `i`,
-// \\(out_i\\) be the output for example `i`,
+// The output shape is identical to the inputs', except along the concat
+// dimension, where it is the sum of the inputs' sizes along that dimension.
//
-// $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
+// The output elements will be resorted to preserve the sort order along
+// increasing dimension number.
+//
+// This op runs in `O(M log M)` time, where `M` is the total number of non-empty
+// values across all inputs. This is due to the need for an internal sort in
+// order to concatenate efficiently across an arbitrary dimension.
+//
+// For example, if `concat_dim = 1` and the inputs are
+//
+// sp_inputs[0]: shape = [2, 3]
+// [0, 2]: "a"
+// [1, 0]: "b"
+// [1, 1]: "c"
+//
+// sp_inputs[1]: shape = [2, 4]
+// [0, 1]: "d"
+// [0, 2]: "e"
+//
+// then the output will be
+//
+// shape = [2, 7]
+// [0, 2]: "a"
+// [0, 4]: "d"
+// [0, 5]: "e"
+// [1, 0]: "b"
+// [1, 1]: "c"
+//
+// Graphically this is equivalent to doing
+//
+// [ a] concat [ d e ] = [ a d e ]
+// [b c ] [ ] [b c ]
//
// Arguments:
-// predictions: A `batch_size` x `classes` tensor.
-// targets: A `batch_size` vector of class ids.
-// k: Number of top elements to look at for computing precision.
+// indices: 2-D. Indices of each input `SparseTensor`.
+// values: 1-D. Non-empty values of each `SparseTensor`.
+// shapes: 1-D. Shapes of each `SparseTensor`.
+// concat_dim: Dimension to concatenate along. Must be in range [-rank, rank),
+// where rank is the number of dimensions in each input `SparseTensor`.
//
-// Returns Computed precision at `k` as a `bool Tensor`.
-func InTopKV2(scope *Scope, predictions tf.Output, targets tf.Output, k tf.Output) (precision tf.Output) {
+// Returns 2-D. Indices of the concatenated `SparseTensor`.1-D. Non-empty values of the concatenated `SparseTensor`.1-D. Shape of the concatenated `SparseTensor`.
+func SparseConcat(scope *Scope, indices []tf.Output, values []tf.Output, shapes []tf.Output, concat_dim int64) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"concat_dim": concat_dim}
opspec := tf.OpSpec{
- Type: "InTopKV2",
+ Type: "SparseConcat",
Input: []tf.Input{
- predictions, targets, k,
+ tf.OutputList(indices), tf.OutputList(values), tf.OutputList(shapes),
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// TakeManySparseFromTensorsMapAttr is an optional argument to TakeManySparseFromTensorsMap.
-type TakeManySparseFromTensorsMapAttr func(optionalAttr)
-
-// TakeManySparseFromTensorsMapContainer sets the optional container attribute to value.
+// Generates sparse cross from a list of sparse and dense tensors.
//
-// value: The container name for the `SparseTensorsMap` read by this op.
-// If not specified, defaults to ""
-func TakeManySparseFromTensorsMapContainer(value string) TakeManySparseFromTensorsMapAttr {
- return func(m optionalAttr) {
- m["container"] = value
- }
-}
-
-// TakeManySparseFromTensorsMapSharedName sets the optional shared_name attribute to value.
+// The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each
+// representing features of one feature column. It outputs a 2D `SparseTensor` with
+// the batchwise crosses of these features.
//
-// value: The shared name for the `SparseTensorsMap` read by this op.
-// It should not be blank; rather the `shared_name` or unique Operation name
-// of the Op that created the original `SparseTensorsMap` should be used.
-// If not specified, defaults to ""
-func TakeManySparseFromTensorsMapSharedName(value string) TakeManySparseFromTensorsMapAttr {
- return func(m optionalAttr) {
- m["shared_name"] = value
- }
-}
-
-// Read `SparseTensors` from a `SparseTensorsMap` and concatenate them.
+// For example, if the inputs are
//
-// The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where
-// `N` is the minibatch size and the rows correspond to the output handles of
-// `AddSparseToTensorsMap` or `AddManySparseToTensorsMap`. The ranks of the
-// original `SparseTensor` objects that went into the given input ops must all
-// match. When the final `SparseTensor` is created, it has rank one
-// higher than the ranks of the incoming `SparseTensor` objects
-// (they have been concatenated along a new row dimension on the left).
+// inputs[0]: SparseTensor with shape = [2, 2]
+// [0, 0]: "a"
+// [1, 0]: "b"
+// [1, 1]: "c"
//
-// The output `SparseTensor` object's shape values for all dimensions but the
-// first are the max across the input `SparseTensor` objects' shape values
-// for the corresponding dimensions. Its first shape value is `N`, the minibatch
-// size.
+// inputs[1]: SparseTensor with shape = [2, 1]
+// [0, 0]: "d"
+// [1, 0]: "e"
//
-// The input `SparseTensor` objects' indices are assumed ordered in
-// standard lexicographic order. If this is not the case, after this
-// step run `SparseReorder` to restore index ordering.
+// inputs[2]: Tensor [["f"], ["g"]]
//
-// For example, if the handles represent an input, which is a `[2, 3]` matrix
-// representing two original `SparseTensor` objects:
+// then the output will be
//
-// ```
-// index = [ 0]
-// [10]
-// [20]
-// values = [1, 2, 3]
-// shape = [50]
-// ```
+// shape = [2, 2]
+// [0, 0]: "a_X_d_X_f"
+// [1, 0]: "b_X_e_X_g"
+// [1, 1]: "c_X_e_X_g"
//
-// and
+// if hashed_output=true then the output will be
//
-// ```
-// index = [ 2]
-// [10]
-// values = [4, 5]
-// shape = [30]
-// ```
+// shape = [2, 2]
+// [0, 0]: FingerprintCat64(
+// Fingerprint64("f"), FingerprintCat64(
+// Fingerprint64("d"), Fingerprint64("a")))
+// [1, 0]: FingerprintCat64(
+// Fingerprint64("g"), FingerprintCat64(
+// Fingerprint64("e"), Fingerprint64("b")))
+// [1, 1]: FingerprintCat64(
+// Fingerprint64("g"), FingerprintCat64(
+// Fingerprint64("e"), Fingerprint64("c")))
//
-// then the final `SparseTensor` will be:
+// Arguments:
+// indices: 2-D. Indices of each input `SparseTensor`.
+// values: 1-D. values of each `SparseTensor`.
+// shapes: 1-D. Shapes of each `SparseTensor`.
+// dense_inputs: 2-D. Columns represented by dense `Tensor`.
+// hashed_output: If true, returns the hash of the cross instead of the string.
+// This will allow us avoiding string manipulations.
+// num_buckets: It is used if hashed_output is true.
+// output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
+// hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
+// function to combine the crosses fingerprints.
//
-// ```
-// index = [0 0]
-// [0 10]
-// [0 20]
-// [1 2]
-// [1 10]
-// values = [1, 2, 3, 4, 5]
-// shape = [2 50]
-// ```
//
-// Arguments:
-// sparse_handles: 1-D, The `N` serialized `SparseTensor` objects.
-// Shape: `[N]`.
-// dtype: The `dtype` of the `SparseTensor` objects stored in the
-// `SparseTensorsMap`.
//
-// Returns 2-D. The `indices` of the minibatch `SparseTensor`.1-D. The `values` of the minibatch `SparseTensor`.1-D. The `shape` of the minibatch `SparseTensor`.
-func TakeManySparseFromTensorsMap(scope *Scope, sparse_handles tf.Output, dtype tf.DataType, optional ...TakeManySparseFromTensorsMapAttr) (sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output) {
+// Returns 2-D. Indices of the concatenated `SparseTensor`.1-D. Non-empty values of the concatenated or hashed
+// `SparseTensor`.1-D. Shape of the concatenated `SparseTensor`.
+func SparseCross(scope *Scope, indices []tf.Output, values []tf.Output, shapes []tf.Output, dense_inputs []tf.Output, hashed_output bool, num_buckets int64, hash_key int64, out_type tf.DataType, internal_type tf.DataType) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtype": dtype}
- for _, a := range optional {
- a(attrs)
- }
+ attrs := map[string]interface{}{"hashed_output": hashed_output, "num_buckets": num_buckets, "hash_key": hash_key, "out_type": out_type, "internal_type": internal_type}
opspec := tf.OpSpec{
- Type: "TakeManySparseFromTensorsMap",
+ Type: "SparseCross",
Input: []tf.Input{
- sparse_handles,
+ tf.OutputList(indices), tf.OutputList(values), tf.OutputList(shapes), tf.OutputList(dense_inputs),
},
Attrs: attrs,
}
@@ -12161,55 +12342,47 @@ func TakeManySparseFromTensorsMap(scope *Scope, sparse_handles tf.Output, dtype
return op.Output(0), op.Output(1), op.Output(2)
}
-// AddSparseToTensorsMapAttr is an optional argument to AddSparseToTensorsMap.
-type AddSparseToTensorsMapAttr func(optionalAttr)
+// ListDiffAttr is an optional argument to ListDiff.
+type ListDiffAttr func(optionalAttr)
-// AddSparseToTensorsMapContainer sets the optional container attribute to value.
-//
-// value: The container name for the `SparseTensorsMap` created by this op.
-// If not specified, defaults to ""
-func AddSparseToTensorsMapContainer(value string) AddSparseToTensorsMapAttr {
+// ListDiffOutIdx sets the optional out_idx attribute to value.
+// If not specified, defaults to DT_INT32
+func ListDiffOutIdx(value tf.DataType) ListDiffAttr {
return func(m optionalAttr) {
- m["container"] = value
+ m["out_idx"] = value
}
}
-// AddSparseToTensorsMapSharedName sets the optional shared_name attribute to value.
+// Computes the difference between two lists of numbers or strings.
//
-// value: The shared name for the `SparseTensorsMap` created by this op.
-// If blank, the new Operation's unique name is used.
-// If not specified, defaults to ""
-func AddSparseToTensorsMapSharedName(value string) AddSparseToTensorsMapAttr {
- return func(m optionalAttr) {
- m["shared_name"] = value
- }
-}
-
-// Add a `SparseTensor` to a `SparseTensorsMap` return its handle.
+// Given a list `x` and a list `y`, this operation returns a list `out` that
+// represents all values that are in `x` but not in `y`. The returned list `out`
+// is sorted in the same order that the numbers appear in `x` (duplicates are
+// preserved). This operation also returns a list `idx` that represents the
+// position of each `out` element in `x`. In other words:
//
-// A `SparseTensor` is represented by three tensors: `sparse_indices`,
-// `sparse_values`, and `sparse_shape`.
+// `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`
//
-// This operator takes the given `SparseTensor` and adds it to a container
-// object (a `SparseTensorsMap`). A unique key within this container is generated
-// in the form of an `int64`, and this is the value that is returned.
+// For example, given this input:
//
-// The `SparseTensor` can then be read out as part of a minibatch by passing
-// the key as a vector element to `TakeManySparseFromTensorsMap`. To ensure
-// the correct `SparseTensorsMap` is accessed, ensure that the same
-// `container` and `shared_name` are passed to that Op. If no `shared_name`
-// is provided here, instead use the *name* of the Operation created by calling
-// `AddSparseToTensorsMap` as the `shared_name` passed to
-// `TakeManySparseFromTensorsMap`. Ensure the Operations are colocated.
+// ```
+// x = [1, 2, 3, 4, 5, 6]
+// y = [1, 3, 5]
+// ```
+//
+// This operation would return:
+//
+// ```
+// out ==> [2, 4, 6]
+// idx ==> [1, 3, 5]
+// ```
//
// Arguments:
-// sparse_indices: 2-D. The `indices` of the `SparseTensor`.
-// sparse_values: 1-D. The `values` of the `SparseTensor`.
-// sparse_shape: 1-D. The `shape` of the `SparseTensor`.
+// x: 1-D. Values to keep.
+// y: 1-D. Values to remove.
//
-// Returns 0-D. The handle of the `SparseTensor` now stored in the
-// `SparseTensorsMap`.
-func AddSparseToTensorsMap(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...AddSparseToTensorsMapAttr) (sparse_handle tf.Output) {
+// Returns 1-D. Values present in `x` but not in `y`.1-D. Positions of `x` values preserved in `out`.
+func ListDiff(scope *Scope, x tf.Output, y tf.Output, optional ...ListDiffAttr) (out tf.Output, idx tf.Output) {
if scope.Err() != nil {
return
}
@@ -12218,214 +12391,241 @@ func AddSparseToTensorsMap(scope *Scope, sparse_indices tf.Output, sparse_values
a(attrs)
}
opspec := tf.OpSpec{
- Type: "AddSparseToTensorsMap",
+ Type: "ListDiff",
Input: []tf.Input{
- sparse_indices, sparse_values, sparse_shape,
+ x, y,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1)
}
-// FusedBatchNormGradV2Attr is an optional argument to FusedBatchNormGradV2.
-type FusedBatchNormGradV2Attr func(optionalAttr)
-
-// FusedBatchNormGradV2Epsilon sets the optional epsilon attribute to value.
+// Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`.
//
-// value: A small float number added to the variance of x.
-// If not specified, defaults to 0.0001
-func FusedBatchNormGradV2Epsilon(value float32) FusedBatchNormGradV2Attr {
- return func(m optionalAttr) {
- m["epsilon"] = value
- }
-}
-
-// FusedBatchNormGradV2DataFormat sets the optional data_format attribute to value.
+// This Op does not require `a_indices` be sorted in standard lexicographic order.
//
-// value: The data format for y_backprop, x, x_backprop.
-// Either "NHWC" (default) or "NCHW".
-// If not specified, defaults to "NHWC"
-func FusedBatchNormGradV2DataFormat(value string) FusedBatchNormGradV2Attr {
- return func(m optionalAttr) {
- m["data_format"] = value
+// Arguments:
+// a_indices: 2-D. The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`.
+// a_values: 1-D. The `values` of the `SparseTensor`, with shape `[nnz]`.
+// a_shape: 1-D. The `shape` of the `SparseTensor`, with shape `[ndims]`.
+// b: `ndims`-D Tensor. With shape `a_shape`.
+func SparseTensorDenseAdd(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b tf.Output) (output tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "SparseTensorDenseAdd",
+ Input: []tf.Input{
+ a_indices, a_values, a_shape, b,
+ },
}
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// FusedBatchNormGradV2IsTraining sets the optional is_training attribute to value.
-//
-// value: A bool value to indicate the operation is for training (default)
-// or inference.
+// SparseToSparseSetOperationAttr is an optional argument to SparseToSparseSetOperation.
+type SparseToSparseSetOperationAttr func(optionalAttr)
+
+// SparseToSparseSetOperationValidateIndices sets the optional validate_indices attribute to value.
// If not specified, defaults to true
-func FusedBatchNormGradV2IsTraining(value bool) FusedBatchNormGradV2Attr {
+func SparseToSparseSetOperationValidateIndices(value bool) SparseToSparseSetOperationAttr {
return func(m optionalAttr) {
- m["is_training"] = value
+ m["validate_indices"] = value
}
}
-// Gradient for batch normalization.
+// Applies set operation along last dimension of 2 `SparseTensor` inputs.
//
-// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
-// The size of 1D Tensors matches the dimension C of the 4D Tensors.
+// See SetOperationOp::SetOperationFromContext for values of `set_operation`.
+//
+// If `validate_indices` is `True`, `SparseToSparseSetOperation` validates the
+// order and range of `set1` and `set2` indices.
+//
+// Input `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`,
+// and `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same
+// as `set2`. Dimension `n` contains values in a set, duplicates are allowed but
+// ignored.
+//
+// Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,
+// and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same
+// as `set1`. Dimension `n` contains values in a set, duplicates are allowed but
+// ignored.
+//
+// If `validate_indices` is `True`, this op validates the order and range of `set1`
+// and `set2` indices.
+//
+// Output `result` is a `SparseTensor` represented by `result_indices`,
+// `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
+// has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
+// dimension contains the result of `set_operation` applied to the corresponding
+// `[0...n-1]` dimension of `set`.
//
// Arguments:
-// y_backprop: A 4D Tensor for the gradient with respect to y.
-// x: A 4D Tensor for input data.
-// scale: A 1D Tensor for scaling factor, to scale the normalized x.
-// reserve_space_1: When is_training is True, a 1D Tensor for the computed batch
-// mean to be reused in gradient computation. When is_training is
-// False, a 1D Tensor for the population mean to be reused in both
-// 1st and 2nd order gradient computation.
-// reserve_space_2: When is_training is True, a 1D Tensor for the computed batch
-// variance (inverted variance in the cuDNN case) to be reused in
-// gradient computation. When is_training is False, a 1D Tensor
-// for the population variance to be reused in both 1st and 2nd
-// order gradient computation.
+// set1_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
+// order.
+// set1_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major
+// order.
+// set1_shape: 1D `Tensor`, shape of a `SparseTensor`. `set1_shape[0...n-1]` must
+// be the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the
+// max set size across `0...n-1` dimensions.
+// set2_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
+// order.
+// set2_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major
+// order.
+// set2_shape: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must
+// be the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the
+// max set size across `0...n-1` dimensions.
//
-// Returns A 4D Tensor for the gradient with respect to x.A 1D Tensor for the gradient with respect to scale.A 1D Tensor for the gradient with respect to offset.Unused placeholder to match the mean input in FusedBatchNorm.Unused placeholder to match the variance input
-// in FusedBatchNorm.
-func FusedBatchNormGradV2(scope *Scope, y_backprop tf.Output, x tf.Output, scale tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output, optional ...FusedBatchNormGradV2Attr) (x_backprop tf.Output, scale_backprop tf.Output, offset_backprop tf.Output, reserve_space_3 tf.Output, reserve_space_4 tf.Output) {
+//
+// Returns 2D indices of a `SparseTensor`.1D values of a `SparseTensor`.1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
+// the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`
+// is the max result set size across all `0...n-1` dimensions.
+func SparseToSparseSetOperation(scope *Scope, set1_indices tf.Output, set1_values tf.Output, set1_shape tf.Output, set2_indices tf.Output, set2_values tf.Output, set2_shape tf.Output, set_operation string, optional ...SparseToSparseSetOperationAttr) (result_indices tf.Output, result_values tf.Output, result_shape tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"set_operation": set_operation}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "FusedBatchNormGradV2",
+ Type: "SparseToSparseSetOperation",
Input: []tf.Input{
- y_backprop, x, scale, reserve_space_1, reserve_space_2,
+ set1_indices, set1_values, set1_shape, set2_indices, set2_values, set2_shape,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// Constructs a tensor by tiling a given tensor.
-//
-// This operation creates a new tensor by replicating `input` `multiples` times.
-// The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements,
-// and the values of `input` are replicated `multiples[i]` times along the 'i'th
-// dimension. For example, tiling `[a b c d]` by `[2]` produces
-// `[a b c d a b c d]`.
+// Computes numerical negative value element-wise.
//
-// Arguments:
-// input: 1-D or higher.
-// multiples: 1-D. Length must be the same as the number of dimensions in `input`
-func Tile(scope *Scope, input tf.Output, multiples tf.Output) (output tf.Output) {
+// I.e., \\(y = -x\\).
+func Neg(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Tile",
+ Type: "Neg",
Input: []tf.Input{
- input, multiples,
+ x,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Returns the element-wise min of two SparseTensors.
+// Adds two `SparseTensor` objects to produce another `SparseTensor`.
//
-// Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
+// The input `SparseTensor` objects' indices are assumed ordered in standard
+// lexicographic order. If this is not the case, before this step run
+// `SparseReorder` to restore index ordering.
//
-// Arguments:
-// a_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
-// SparseTensor, in the canonical lexicographic ordering.
-// a_values: 1-D. `N` non-empty values corresponding to `a_indices`.
-// a_shape: 1-D. Shape of the input SparseTensor.
-// b_indices: counterpart to `a_indices` for the other operand.
-// b_values: counterpart to `a_values` for the other operand; must be of the same dtype.
-// b_shape: counterpart to `a_shape` for the other operand; the two shapes must be equal.
+// By default, if two values sum to zero at some index, the output `SparseTensor`
+// would still include that particular location in its index, storing a zero in the
+// corresponding value slot. To override this, callers can specify `thresh`,
+// indicating that if the sum has a magnitude strictly smaller than `thresh`, its
+// corresponding value and index would then not be included. In particular,
+// `thresh == 0` (default) means everything is kept and actual thresholding happens
+// only for a positive value.
//
-// Returns 2-D. The indices of the output SparseTensor.1-D. The values of the output SparseTensor.
-func SparseSparseMinimum(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b_indices tf.Output, b_values tf.Output, b_shape tf.Output) (output_indices tf.Output, output_values tf.Output) {
+// In the following shapes, `nnz` is the count after taking `thresh` into account.
+//
+// Arguments:
+// a_indices: 2-D. The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix.
+// a_values: 1-D. The `values` of the first `SparseTensor`, size `[nnz]` Vector.
+// a_shape: 1-D. The `shape` of the first `SparseTensor`, size `[ndims]` Vector.
+// b_indices: 2-D. The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix.
+// b_values: 1-D. The `values` of the second `SparseTensor`, size `[nnz]` Vector.
+// b_shape: 1-D. The `shape` of the second `SparseTensor`, size `[ndims]` Vector.
+// thresh: 0-D. The magnitude threshold that determines if an output value/index
+// pair takes space.
+func SparseAdd(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b_indices tf.Output, b_values tf.Output, b_shape tf.Output, thresh tf.Output) (sum_indices tf.Output, sum_values tf.Output, sum_shape tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "SparseSparseMinimum",
+ Type: "SparseAdd",
Input: []tf.Input{
- a_indices, a_values, a_shape, b_indices, b_values, b_shape,
+ a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh,
},
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// AllCandidateSamplerAttr is an optional argument to AllCandidateSampler.
-type AllCandidateSamplerAttr func(optionalAttr)
+// OrderedMapPeekAttr is an optional argument to OrderedMapPeek.
+type OrderedMapPeekAttr func(optionalAttr)
-// AllCandidateSamplerSeed sets the optional seed attribute to value.
-//
-// value: If either seed or seed2 are set to be non-zero, the random number
-// generator is seeded by the given seed. Otherwise, it is seeded by a
-// random seed.
+// OrderedMapPeekCapacity sets the optional capacity attribute to value.
// If not specified, defaults to 0
-func AllCandidateSamplerSeed(value int64) AllCandidateSamplerAttr {
+//
+// REQUIRES: value >= 0
+func OrderedMapPeekCapacity(value int64) OrderedMapPeekAttr {
return func(m optionalAttr) {
- m["seed"] = value
+ m["capacity"] = value
}
}
-// AllCandidateSamplerSeed2 sets the optional seed2 attribute to value.
-//
-// value: An second seed to avoid seed collision.
+// OrderedMapPeekMemoryLimit sets the optional memory_limit attribute to value.
// If not specified, defaults to 0
-func AllCandidateSamplerSeed2(value int64) AllCandidateSamplerAttr {
+//
+// REQUIRES: value >= 0
+func OrderedMapPeekMemoryLimit(value int64) OrderedMapPeekAttr {
return func(m optionalAttr) {
- m["seed2"] = value
+ m["memory_limit"] = value
}
}
-// Generates labels for candidate sampling with a learned unigram distribution.
-//
-// See explanations of candidate sampling and the data formats at
-// go/candidate-sampling.
-//
-// For each batch, this op picks a single set of sampled candidate labels.
-//
-// The advantages of sampling candidates per-batch are simplicity and the
-// possibility of efficient dense matrix multiplication. The disadvantage is that
-// the sampled candidates must be chosen independently of the context and of the
-// true labels.
-//
-// Arguments:
-// true_classes: A batch_size * num_true matrix, in which each row contains the
-// IDs of the num_true target_classes in the corresponding original label.
-// num_true: Number of true labels per context.
-// num_sampled: Number of candidates to produce.
-// unique: If unique is true, we sample with rejection, so that all sampled
-// candidates in a batch are unique. This requires some approximation to
-// estimate the post-rejection sampling probabilities.
+// OrderedMapPeekContainer sets the optional container attribute to value.
+// If not specified, defaults to ""
+func OrderedMapPeekContainer(value string) OrderedMapPeekAttr {
+ return func(m optionalAttr) {
+ m["container"] = value
+ }
+}
+
+// OrderedMapPeekSharedName sets the optional shared_name attribute to value.
+// If not specified, defaults to ""
+func OrderedMapPeekSharedName(value string) OrderedMapPeekAttr {
+ return func(m optionalAttr) {
+ m["shared_name"] = value
+ }
+}
+
+// Op peeks at the values at the specified key. If the
//
-// Returns A vector of length num_sampled, in which each element is
-// the ID of a sampled candidate.A batch_size * num_true matrix, representing
-// the number of times each candidate is expected to occur in a batch
-// of sampled candidates. If unique=true, then this is a probability.A vector of length num_sampled, for each sampled
-// candidate representing the number of times the candidate is expected
-// to occur in a batch of sampled candidates. If unique=true, then this is a
-// probability.
-func AllCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, optional ...AllCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
+// underlying container does not contain this key
+// this op will block until it does. This Op is optimized for
+// performance.
+func OrderedMapPeek(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...OrderedMapPeekAttr) (values []tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique}
+ attrs := map[string]interface{}{"dtypes": dtypes}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "AllCandidateSampler",
+ Type: "OrderedMapPeek",
Input: []tf.Input{
- true_classes,
+ key, indices,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
+ if scope.Err() != nil {
+ return
+ }
+ var idx int
+ var err error
+ if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
+ scope.UpdateErr("OrderedMapPeek", err)
+ return
+ }
+ return values
}
// DecodeAndCropJpegAttr is an optional argument to DecodeAndCropJpeg.
@@ -12544,294 +12744,274 @@ func DecodeAndCropJpeg(scope *Scope, contents tf.Output, crop_window tf.Output,
return op.Output(0)
}
-// RandomPoissonV2Attr is an optional argument to RandomPoissonV2.
-type RandomPoissonV2Attr func(optionalAttr)
+// AllCandidateSamplerAttr is an optional argument to AllCandidateSampler.
+type AllCandidateSamplerAttr func(optionalAttr)
-// RandomPoissonV2Seed sets the optional seed attribute to value.
+// AllCandidateSamplerSeed sets the optional seed attribute to value.
//
-// value: If either `seed` or `seed2` are set to be non-zero, the random number
+// value: If either seed or seed2 are set to be non-zero, the random number
// generator is seeded by the given seed. Otherwise, it is seeded by a
// random seed.
// If not specified, defaults to 0
-func RandomPoissonV2Seed(value int64) RandomPoissonV2Attr {
+func AllCandidateSamplerSeed(value int64) AllCandidateSamplerAttr {
return func(m optionalAttr) {
m["seed"] = value
}
}
-// RandomPoissonV2Seed2 sets the optional seed2 attribute to value.
+// AllCandidateSamplerSeed2 sets the optional seed2 attribute to value.
//
-// value: A second seed to avoid seed collision.
+// value: An second seed to avoid seed collision.
// If not specified, defaults to 0
-func RandomPoissonV2Seed2(value int64) RandomPoissonV2Attr {
+func AllCandidateSamplerSeed2(value int64) AllCandidateSamplerAttr {
return func(m optionalAttr) {
m["seed2"] = value
}
}
-// RandomPoissonV2Dtype sets the optional dtype attribute to value.
-// If not specified, defaults to DT_INT64
-func RandomPoissonV2Dtype(value tf.DataType) RandomPoissonV2Attr {
- return func(m optionalAttr) {
- m["dtype"] = value
- }
-}
-
-// Outputs random values from the Poisson distribution(s) described by rate.
+// Generates labels for candidate sampling with a learned unigram distribution.
//
-// This op uses two algorithms, depending on rate. If rate >= 10, then
-// the algorithm by Hormann is used to acquire samples via
-// transformation-rejection.
-// See http://www.sciencedirect.com/science/article/pii/0167668793909974.
+// See explanations of candidate sampling and the data formats at
+// go/candidate-sampling.
//
-// Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform
-// random variables.
-// See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer
-// Programming, Volume 2. Addison Wesley
+// For each batch, this op picks a single set of sampled candidate labels.
+//
+// The advantages of sampling candidates per-batch are simplicity and the
+// possibility of efficient dense matrix multiplication. The disadvantage is that
+// the sampled candidates must be chosen independently of the context and of the
+// true labels.
//
// Arguments:
-// shape: 1-D integer tensor. Shape of independent samples to draw from each
-// distribution described by the shape parameters given in rate.
-// rate: A tensor in which each scalar is a "rate" parameter describing the
-// associated poisson distribution.
+// true_classes: A batch_size * num_true matrix, in which each row contains the
+// IDs of the num_true target_classes in the corresponding original label.
+// num_true: Number of true labels per context.
+// num_sampled: Number of candidates to produce.
+// unique: If unique is true, we sample with rejection, so that all sampled
+// candidates in a batch are unique. This requires some approximation to
+// estimate the post-rejection sampling probabilities.
//
-// Returns A tensor with shape `shape + shape(rate)`. Each slice
-// `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for
-// `rate[i0, i1, ...iN]`.
-func RandomPoissonV2(scope *Scope, shape tf.Output, rate tf.Output, optional ...RandomPoissonV2Attr) (output tf.Output) {
+// Returns A vector of length num_sampled, in which each element is
+// the ID of a sampled candidate.A batch_size * num_true matrix, representing
+// the number of times each candidate is expected to occur in a batch
+// of sampled candidates. If unique=true, then this is a probability.A vector of length num_sampled, for each sampled
+// candidate representing the number of times the candidate is expected
+// to occur in a batch of sampled candidates. If unique=true, then this is a
+// probability.
+func AllCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, optional ...AllCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "RandomPoissonV2",
+ Type: "AllCandidateSampler",
Input: []tf.Input{
- shape, rate,
+ true_classes,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// OrderedMapPeekAttr is an optional argument to OrderedMapPeek.
-type OrderedMapPeekAttr func(optionalAttr)
-
-// OrderedMapPeekCapacity sets the optional capacity attribute to value.
-// If not specified, defaults to 0
+// Returns the element-wise min of two SparseTensors.
//
-// REQUIRES: value >= 0
-func OrderedMapPeekCapacity(value int64) OrderedMapPeekAttr {
- return func(m optionalAttr) {
- m["capacity"] = value
- }
-}
-
-// OrderedMapPeekMemoryLimit sets the optional memory_limit attribute to value.
-// If not specified, defaults to 0
+// Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
//
-// REQUIRES: value >= 0
-func OrderedMapPeekMemoryLimit(value int64) OrderedMapPeekAttr {
- return func(m optionalAttr) {
- m["memory_limit"] = value
- }
-}
-
-// OrderedMapPeekContainer sets the optional container attribute to value.
-// If not specified, defaults to ""
-func OrderedMapPeekContainer(value string) OrderedMapPeekAttr {
- return func(m optionalAttr) {
- m["container"] = value
- }
-}
-
-// OrderedMapPeekSharedName sets the optional shared_name attribute to value.
-// If not specified, defaults to ""
-func OrderedMapPeekSharedName(value string) OrderedMapPeekAttr {
- return func(m optionalAttr) {
- m["shared_name"] = value
- }
-}
-
-// Op peeks at the values at the specified key. If the
+// Arguments:
+// a_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
+// SparseTensor, in the canonical lexicographic ordering.
+// a_values: 1-D. `N` non-empty values corresponding to `a_indices`.
+// a_shape: 1-D. Shape of the input SparseTensor.
+// b_indices: counterpart to `a_indices` for the other operand.
+// b_values: counterpart to `a_values` for the other operand; must be of the same dtype.
+// b_shape: counterpart to `a_shape` for the other operand; the two shapes must be equal.
//
-// underlying container does not contain this key
-// this op will block until it does. This Op is optimized for
-// performance.
-func OrderedMapPeek(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...OrderedMapPeekAttr) (values []tf.Output) {
+// Returns 2-D. The indices of the output SparseTensor.1-D. The values of the output SparseTensor.
+func SparseSparseMinimum(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b_indices tf.Output, b_values tf.Output, b_shape tf.Output) (output_indices tf.Output, output_values tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtypes": dtypes}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "OrderedMapPeek",
+ Type: "SparseSparseMinimum",
Input: []tf.Input{
- key, indices,
+ a_indices, a_values, a_shape, b_indices, b_values, b_shape,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
- if scope.Err() != nil {
- return
- }
- var idx int
- var err error
- if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
- scope.UpdateErr("OrderedMapPeek", err)
- return
- }
- return values
+ return op.Output(0), op.Output(1)
}
-// Adds two `SparseTensor` objects to produce another `SparseTensor`.
-//
-// The input `SparseTensor` objects' indices are assumed ordered in standard
-// lexicographic order. If this is not the case, before this step run
-// `SparseReorder` to restore index ordering.
-//
-// By default, if two values sum to zero at some index, the output `SparseTensor`
-// would still include that particular location in its index, storing a zero in the
-// corresponding value slot. To override this, callers can specify `thresh`,
-// indicating that if the sum has a magnitude strictly smaller than `thresh`, its
-// corresponding value and index would then not be included. In particular,
-// `thresh == 0` (default) means everything is kept and actual thresholding happens
-// only for a positive value.
+// Constructs a tensor by tiling a given tensor.
//
-// In the following shapes, `nnz` is the count after taking `thresh` into account.
+// This operation creates a new tensor by replicating `input` `multiples` times.
+// The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements,
+// and the values of `input` are replicated `multiples[i]` times along the 'i'th
+// dimension. For example, tiling `[a b c d]` by `[2]` produces
+// `[a b c d a b c d]`.
//
// Arguments:
-// a_indices: 2-D. The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix.
-// a_values: 1-D. The `values` of the first `SparseTensor`, size `[nnz]` Vector.
-// a_shape: 1-D. The `shape` of the first `SparseTensor`, size `[ndims]` Vector.
-// b_indices: 2-D. The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix.
-// b_values: 1-D. The `values` of the second `SparseTensor`, size `[nnz]` Vector.
-// b_shape: 1-D. The `shape` of the second `SparseTensor`, size `[ndims]` Vector.
-// thresh: 0-D. The magnitude threshold that determines if an output value/index
-// pair takes space.
-func SparseAdd(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b_indices tf.Output, b_values tf.Output, b_shape tf.Output, thresh tf.Output) (sum_indices tf.Output, sum_values tf.Output, sum_shape tf.Output) {
+// input: 1-D or higher.
+// multiples: 1-D. Length must be the same as the number of dimensions in `input`
+func Tile(scope *Scope, input tf.Output, multiples tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "SparseAdd",
+ Type: "Tile",
Input: []tf.Input{
- a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh,
+ input, multiples,
},
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
+ return op.Output(0)
}
-// Computes the gradient of the sigmoid of `x` wrt its input.
+// Saves the input tensors to disk.
//
-// Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and
-// `dy` is the corresponding input gradient.
-func SigmoidGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
+// The size of `tensor_names` must match the number of tensors in `data`. `data[i]`
+// is written to `filename` with name `tensor_names[i]`.
+//
+// See also `SaveSlices`.
+//
+// Arguments:
+// filename: Must have a single element. The name of the file to which we write
+// the tensor.
+// tensor_names: Shape `[N]`. The names of the tensors to be saved.
+// data: `N` tensors to save.
+//
+// Returns the created operation.
+func Save(scope *Scope, filename tf.Output, tensor_names tf.Output, data []tf.Output) (o *tf.Operation) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "SigmoidGrad",
+ Type: "Save",
Input: []tf.Input{
- y, dy,
+ filename, tensor_names, tf.OutputList(data),
},
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// Computes numerical negative value element-wise.
+// Returns element-wise remainder of division. When `x < 0` xor `y < 0` is
//
-// I.e., \\(y = -x\\).
-func Neg(scope *Scope, x tf.Output) (y tf.Output) {
+// true, this follows Python semantics in that the result here is consistent
+// with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.
+//
+// *NOTE*: `FloorMod` supports broadcasting. More about broadcasting
+// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+func FloorMod(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Neg",
+ Type: "FloorMod",
Input: []tf.Input{
- x,
+ x, y,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// SparseToSparseSetOperationAttr is an optional argument to SparseToSparseSetOperation.
-type SparseToSparseSetOperationAttr func(optionalAttr)
+// TakeManySparseFromTensorsMapAttr is an optional argument to TakeManySparseFromTensorsMap.
+type TakeManySparseFromTensorsMapAttr func(optionalAttr)
-// SparseToSparseSetOperationValidateIndices sets the optional validate_indices attribute to value.
-// If not specified, defaults to true
-func SparseToSparseSetOperationValidateIndices(value bool) SparseToSparseSetOperationAttr {
+// TakeManySparseFromTensorsMapContainer sets the optional container attribute to value.
+//
+// value: The container name for the `SparseTensorsMap` read by this op.
+// If not specified, defaults to ""
+func TakeManySparseFromTensorsMapContainer(value string) TakeManySparseFromTensorsMapAttr {
return func(m optionalAttr) {
- m["validate_indices"] = value
+ m["container"] = value
}
}
-// Applies set operation along last dimension of 2 `SparseTensor` inputs.
+// TakeManySparseFromTensorsMapSharedName sets the optional shared_name attribute to value.
//
-// See SetOperationOp::SetOperationFromContext for values of `set_operation`.
+// value: The shared name for the `SparseTensorsMap` read by this op.
+// It should not be blank; rather the `shared_name` or unique Operation name
+// of the Op that created the original `SparseTensorsMap` should be used.
+// If not specified, defaults to ""
+func TakeManySparseFromTensorsMapSharedName(value string) TakeManySparseFromTensorsMapAttr {
+ return func(m optionalAttr) {
+ m["shared_name"] = value
+ }
+}
+
+// Read `SparseTensors` from a `SparseTensorsMap` and concatenate them.
//
-// If `validate_indices` is `True`, `SparseToSparseSetOperation` validates the
-// order and range of `set1` and `set2` indices.
+// The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where
+// `N` is the minibatch size and the rows correspond to the output handles of
+// `AddSparseToTensorsMap` or `AddManySparseToTensorsMap`. The ranks of the
+// original `SparseTensor` objects that went into the given input ops must all
+// match. When the final `SparseTensor` is created, it has rank one
+// higher than the ranks of the incoming `SparseTensor` objects
+// (they have been concatenated along a new row dimension on the left).
//
-// Input `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`,
-// and `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same
-// as `set2`. Dimension `n` contains values in a set, duplicates are allowed but
-// ignored.
+// The output `SparseTensor` object's shape values for all dimensions but the
+// first are the max across the input `SparseTensor` objects' shape values
+// for the corresponding dimensions. Its first shape value is `N`, the minibatch
+// size.
//
-// Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,
-// and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same
-// as `set1`. Dimension `n` contains values in a set, duplicates are allowed but
-// ignored.
+// The input `SparseTensor` objects' indices are assumed ordered in
+// standard lexicographic order. If this is not the case, after this
+// step run `SparseReorder` to restore index ordering.
//
-// If `validate_indices` is `True`, this op validates the order and range of `set1`
-// and `set2` indices.
+// For example, if the handles represent an input, which is a `[2, 3]` matrix
+// representing two original `SparseTensor` objects:
//
-// Output `result` is a `SparseTensor` represented by `result_indices`,
-// `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
-// has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
-// dimension contains the result of `set_operation` applied to the corresponding
-// `[0...n-1]` dimension of `set`.
+// ```
+// index = [ 0]
+// [10]
+// [20]
+// values = [1, 2, 3]
+// shape = [50]
+// ```
//
-// Arguments:
-// set1_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
-// order.
-// set1_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major
-// order.
-// set1_shape: 1D `Tensor`, shape of a `SparseTensor`. `set1_shape[0...n-1]` must
-// be the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the
-// max set size across `0...n-1` dimensions.
-// set2_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
-// order.
-// set2_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major
-// order.
-// set2_shape: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must
-// be the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the
-// max set size across `0...n-1` dimensions.
+// and
//
+// ```
+// index = [ 2]
+// [10]
+// values = [4, 5]
+// shape = [30]
+// ```
//
-// Returns 2D indices of a `SparseTensor`.1D values of a `SparseTensor`.1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
-// the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`
-// is the max result set size across all `0...n-1` dimensions.
-func SparseToSparseSetOperation(scope *Scope, set1_indices tf.Output, set1_values tf.Output, set1_shape tf.Output, set2_indices tf.Output, set2_values tf.Output, set2_shape tf.Output, set_operation string, optional ...SparseToSparseSetOperationAttr) (result_indices tf.Output, result_values tf.Output, result_shape tf.Output) {
+// then the final `SparseTensor` will be:
+//
+// ```
+// index = [0 0]
+// [0 10]
+// [0 20]
+// [1 2]
+// [1 10]
+// values = [1, 2, 3, 4, 5]
+// shape = [2 50]
+// ```
+//
+// Arguments:
+// sparse_handles: 1-D, The `N` serialized `SparseTensor` objects.
+// Shape: `[N]`.
+// dtype: The `dtype` of the `SparseTensor` objects stored in the
+// `SparseTensorsMap`.
+//
+// Returns 2-D. The `indices` of the minibatch `SparseTensor`.1-D. The `values` of the minibatch `SparseTensor`.1-D. The `shape` of the minibatch `SparseTensor`.
+func TakeManySparseFromTensorsMap(scope *Scope, sparse_handles tf.Output, dtype tf.DataType, optional ...TakeManySparseFromTensorsMapAttr) (sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"set_operation": set_operation}
+ attrs := map[string]interface{}{"dtype": dtype}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "SparseToSparseSetOperation",
+ Type: "TakeManySparseFromTensorsMap",
Input: []tf.Input{
- set1_indices, set1_values, set1_shape, set2_indices, set2_values, set2_shape,
+ sparse_handles,
},
Attrs: attrs,
}
@@ -12839,180 +13019,164 @@ func SparseToSparseSetOperation(scope *Scope, set1_indices tf.Output, set1_value
return op.Output(0), op.Output(1), op.Output(2)
}
-// Elementwise computes the bitwise OR of `x` and `y`.
+// Says whether the targets are in the top `K` predictions.
//
-// The result will have those bits set, that are set in `x`, `y` or both. The
-// computation is performed on the underlying representations of `x` and `y`.
-func BitwiseOr(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
+// This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
+// prediction for the target class is among the top `k` predictions among
+// all predictions for example `i`. Note that the behavior of `InTopK` differs
+// from the `TopK` op in its handling of ties; if multiple classes have the
+// same prediction value and straddle the top-`k` boundary, all of those
+// classes are considered to be in the top `k`.
+//
+// More formally, let
+//
+// \\(predictions_i\\) be the predictions for all classes for example `i`,
+// \\(targets_i\\) be the target class for example `i`,
+// \\(out_i\\) be the output for example `i`,
+//
+// $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
+//
+// Arguments:
+// predictions: A `batch_size` x `classes` tensor.
+// targets: A `batch_size` vector of class ids.
+// k: Number of top elements to look at for computing precision.
+//
+// Returns Computed precision at `k` as a `bool Tensor`.
+func InTopKV2(scope *Scope, predictions tf.Output, targets tf.Output, k tf.Output) (precision tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "BitwiseOr",
+ Type: "InTopKV2",
Input: []tf.Input{
- x, y,
+ predictions, targets, k,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`.
+// Assigns a new value to a variable.
//
-// This Op does not require `a_indices` be sorted in standard lexicographic order.
+// Any ReadVariableOp with a control dependency on this op is guaranteed to return
+// this value or a subsequent newer value of the variable.
//
// Arguments:
-// a_indices: 2-D. The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`.
-// a_values: 1-D. The `values` of the `SparseTensor`, with shape `[nnz]`.
-// a_shape: 1-D. The `shape` of the `SparseTensor`, with shape `[ndims]`.
-// b: `ndims`-D Tensor. With shape `a_shape`.
-func SparseTensorDenseAdd(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b tf.Output) (output tf.Output) {
+// resource: handle to the resource in which to store the variable.
+// value: the value to set the new tensor to use.
+//
+// Returns the created operation.
+func AssignVariableOp(scope *Scope, resource tf.Output, value tf.Output) (o *tf.Operation) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "SparseTensorDenseAdd",
+ Type: "AssignVariableOp",
Input: []tf.Input{
- a_indices, a_values, a_shape, b,
+ resource, value,
},
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// AvgPoolAttr is an optional argument to AvgPool.
-type AvgPoolAttr func(optionalAttr)
-
-// AvgPoolDataFormat sets the optional data_format attribute to value.
-//
-// value: Specify the data format of the input and output data. With the
-// default format "NHWC", the data is stored in the order of:
-// [batch, in_height, in_width, in_channels].
-// Alternatively, the format could be "NCHW", the data storage order of:
-// [batch, in_channels, in_height, in_width].
-// If not specified, defaults to "NHWC"
-func AvgPoolDataFormat(value string) AvgPoolAttr {
- return func(m optionalAttr) {
- m["data_format"] = value
- }
+ return scope.AddOperation(opspec)
}
-// Performs average pooling on the input.
-//
-// Each entry in `output` is the mean of the corresponding size `ksize`
-// window in `value`.
+// Returns a tensor of ones with the same shape and type as x.
//
// Arguments:
-// value: 4-D with shape `[batch, height, width, channels]`.
-// ksize: The size of the sliding window for each dimension of `value`.
-// strides: The stride of the sliding window for each dimension of `value`.
-// padding: The type of padding algorithm to use.
+// x: a tensor of type T.
//
-// Returns The average pooled output tensor.
-func AvgPool(scope *Scope, value tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPoolAttr) (output tf.Output) {
+// Returns a tensor of the same shape and type as x but filled with ones.
+func OnesLike(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "AvgPool",
+ Type: "OnesLike",
Input: []tf.Input{
- value,
+ x,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Slice a `SparseTensor` based on the `start` and `size`.
-//
-// For example, if the input is
+// The gradient of SparseFillEmptyRows.
//
-// input_tensor = shape = [2, 7]
-// [ a d e ]
-// [b c ]
+// Takes vectors reverse_index_map, shaped `[N]`, and grad_values,
+// shaped `[N_full]`, where `N_full >= N` and copies data into either
+// `d_values` or `d_default_value`. Here `d_values` is shaped `[N]` and
+// `d_default_value` is a scalar.
//
-// Graphically the output tensors are:
+// d_values[j] = grad_values[reverse_index_map[j]]
+// d_default_value = sum_{k : 0 .. N_full - 1} (
+// grad_values[k] * 1{k not in reverse_index_map})
//
-// sparse_slice([0, 0], [2, 4]) = shape = [2, 4]
-// [ a ]
-// [b c ]
+// Arguments:
+// reverse_index_map: 1-D. The reverse index map from SparseFillEmptyRows.
+// grad_values: 1-D. The gradients from backprop.
//
-// sparse_slice([0, 4], [2, 3]) = shape = [2, 3]
-// [ d e ]
-// [ ]
+// Returns 1-D. The backprop into values.0-D. The backprop into default_value.
+func SparseFillEmptyRowsGrad(scope *Scope, reverse_index_map tf.Output, grad_values tf.Output) (d_values tf.Output, d_default_value tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "SparseFillEmptyRowsGrad",
+ Input: []tf.Input{
+ reverse_index_map, grad_values,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0), op.Output(1)
+}
+
+// Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)`
//
-// Arguments:
-// indices: 2-D tensor represents the indices of the sparse tensor.
-// values: 1-D tensor represents the values of the sparse tensor.
-// shape: 1-D. tensor represents the shape of the sparse tensor.
-// start: 1-D. tensor represents the start of the slice.
-// size: 1-D. tensor represents the size of the slice.
-// output indices: A list of 1-D tensors represents the indices of the output
-// sparse tensors.
+// if < 0, `scale * features` otherwise.
//
-// Returns A list of 1-D tensors represents the values of the output sparse
-// tensors.A list of 1-D tensors represents the shape of the output sparse
-// tensors.
-func SparseSlice(scope *Scope, indices tf.Output, values tf.Output, shape tf.Output, start tf.Output, size tf.Output) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
+// See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
+func Selu(scope *Scope, features tf.Output) (activations tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "SparseSlice",
+ Type: "Selu",
Input: []tf.Input{
- indices, values, shape, start, size,
+ features,
},
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
+ return op.Output(0)
}
-// ListDiffAttr is an optional argument to ListDiff.
-type ListDiffAttr func(optionalAttr)
+// SetSizeAttr is an optional argument to SetSize.
+type SetSizeAttr func(optionalAttr)
-// ListDiffOutIdx sets the optional out_idx attribute to value.
-// If not specified, defaults to DT_INT32
-func ListDiffOutIdx(value tf.DataType) ListDiffAttr {
+// SetSizeValidateIndices sets the optional validate_indices attribute to value.
+// If not specified, defaults to true
+func SetSizeValidateIndices(value bool) SetSizeAttr {
return func(m optionalAttr) {
- m["out_idx"] = value
+ m["validate_indices"] = value
}
}
-// Computes the difference between two lists of numbers or strings.
-//
-// Given a list `x` and a list `y`, this operation returns a list `out` that
-// represents all values that are in `x` but not in `y`. The returned list `out`
-// is sorted in the same order that the numbers appear in `x` (duplicates are
-// preserved). This operation also returns a list `idx` that represents the
-// position of each `out` element in `x`. In other words:
-//
-// `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`
-//
-// For example, given this input:
-//
-// ```
-// x = [1, 2, 3, 4, 5, 6]
-// y = [1, 3, 5]
-// ```
+// Number of unique elements along last dimension of input `set`.
//
-// This operation would return:
+// Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`,
+// and `set_shape`. The last dimension contains values in a set, duplicates are
+// allowed but ignored.
//
-// ```
-// out ==> [2, 4, 6]
-// idx ==> [1, 3, 5]
-// ```
+// If `validate_indices` is `True`, this op validates the order and range of `set`
+// indices.
//
// Arguments:
-// x: 1-D. Values to keep.
-// y: 1-D. Values to remove.
+// set_indices: 2D `Tensor`, indices of a `SparseTensor`.
+// set_values: 1D `Tensor`, values of a `SparseTensor`.
+// set_shape: 1D `Tensor`, shape of a `SparseTensor`.
//
-// Returns 1-D. Values present in `x` but not in `y`.1-D. Positions of `x` values preserved in `out`.
-func ListDiff(scope *Scope, x tf.Output, y tf.Output, optional ...ListDiffAttr) (out tf.Output, idx tf.Output) {
+// Returns For `set` ranked `n`, this is a `Tensor` with rank `n-1`, and the same 1st
+// `n-1` dimensions as `set`. Each value is the number of unique elements in
+// the corresponding `[0...n-1]` dimension of `set`.
+func SetSize(scope *Scope, set_indices tf.Output, set_values tf.Output, set_shape tf.Output, optional ...SetSizeAttr) (size tf.Output) {
if scope.Err() != nil {
return
}
@@ -13021,276 +13185,191 @@ func ListDiff(scope *Scope, x tf.Output, y tf.Output, optional ...ListDiffAttr)
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ListDiff",
+ Type: "SetSize",
Input: []tf.Input{
- x, y,
+ set_indices, set_values, set_shape,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
+ return op.Output(0)
}
-// Generates sparse cross from a list of sparse and dense tensors.
-//
-// The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each
-// representing features of one feature column. It outputs a 2D `SparseTensor` with
-// the batchwise crosses of these features.
-//
-// For example, if the inputs are
-//
-// inputs[0]: SparseTensor with shape = [2, 2]
-// [0, 0]: "a"
-// [1, 0]: "b"
-// [1, 1]: "c"
-//
-// inputs[1]: SparseTensor with shape = [2, 1]
-// [0, 0]: "d"
-// [1, 0]: "e"
-//
-// inputs[2]: Tensor [["f"], ["g"]]
-//
-// then the output will be
-//
-// shape = [2, 2]
-// [0, 0]: "a_X_d_X_f"
-// [1, 0]: "b_X_e_X_g"
-// [1, 1]: "c_X_e_X_g"
+// Computes the sign and the log of the absolute value of the determinant of
//
-// if hashed_output=true then the output will be
+// one or more square matrices.
//
-// shape = [2, 2]
-// [0, 0]: FingerprintCat64(
-// Fingerprint64("f"), FingerprintCat64(
-// Fingerprint64("d"), Fingerprint64("a")))
-// [1, 0]: FingerprintCat64(
-// Fingerprint64("g"), FingerprintCat64(
-// Fingerprint64("e"), Fingerprint64("b")))
-// [1, 1]: FingerprintCat64(
-// Fingerprint64("g"), FingerprintCat64(
-// Fingerprint64("e"), Fingerprint64("c")))
+// The input is a tensor of shape `[N, M, M]` whose inner-most 2 dimensions
+// form square matrices. The outputs are two tensors containing the signs and
+// absolute values of the log determinants for all N input submatrices
+// `[..., :, :]` such that the determinant = sign*exp(log_abs_determinant).
+// The log_abs_determinant is computed as det(P)*sum(log(diag(LU))) where LU
+// is the LU decomposition of the input and P is the corresponding
+// permutation matrix.
//
// Arguments:
-// indices: 2-D. Indices of each input `SparseTensor`.
-// values: 1-D. values of each `SparseTensor`.
-// shapes: 1-D. Shapes of each `SparseTensor`.
-// dense_inputs: 2-D. Columns represented by dense `Tensor`.
-// hashed_output: If true, returns the hash of the cross instead of the string.
-// This will allow us avoiding string manipulations.
-// num_buckets: It is used if hashed_output is true.
-// output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
-// hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
-// function to combine the crosses fingerprints.
-//
-//
+// input: Shape is `[N, M, M]`.
//
-// Returns 2-D. Indices of the concatenated `SparseTensor`.1-D. Non-empty values of the concatenated or hashed
-// `SparseTensor`.1-D. Shape of the concatenated `SparseTensor`.
-func SparseCross(scope *Scope, indices []tf.Output, values []tf.Output, shapes []tf.Output, dense_inputs []tf.Output, hashed_output bool, num_buckets int64, hash_key int64, out_type tf.DataType, internal_type tf.DataType) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
+// Returns The signs of the log determinants of the inputs. Shape is `[N]`.The logs of the absolute values of the determinants
+// of the N input matrices. Shape is `[N]`.
+func LogMatrixDeterminant(scope *Scope, input tf.Output) (sign tf.Output, log_abs_determinant tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"hashed_output": hashed_output, "num_buckets": num_buckets, "hash_key": hash_key, "out_type": out_type, "internal_type": internal_type}
opspec := tf.OpSpec{
- Type: "SparseCross",
+ Type: "LogMatrixDeterminant",
Input: []tf.Input{
- tf.OutputList(indices), tf.OutputList(values), tf.OutputList(shapes), tf.OutputList(dense_inputs),
+ input,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
+ return op.Output(0), op.Output(1)
}
-// FractionalMaxPoolAttr is an optional argument to FractionalMaxPool.
-type FractionalMaxPoolAttr func(optionalAttr)
+// SumAttr is an optional argument to Sum.
+type SumAttr func(optionalAttr)
-// FractionalMaxPoolPseudoRandom sets the optional pseudo_random attribute to value.
+// SumKeepDims sets the optional keep_dims attribute to value.
//
-// value: When set to True, generates the pooling sequence in a
-// pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin
-// Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for
-// difference between pseudorandom and random.
+// value: If true, retain reduced dimensions with length 1.
// If not specified, defaults to false
-func FractionalMaxPoolPseudoRandom(value bool) FractionalMaxPoolAttr {
+func SumKeepDims(value bool) SumAttr {
return func(m optionalAttr) {
- m["pseudo_random"] = value
+ m["keep_dims"] = value
}
}
-// FractionalMaxPoolOverlapping sets the optional overlapping attribute to value.
-//
-// value: When set to True, it means when pooling, the values at the boundary
-// of adjacent pooling cells are used by both cells. For example:
+// Computes the sum of elements across dimensions of a tensor.
//
-// `index 0 1 2 3 4`
+// Reduces `input` along the dimensions given in `axis`. Unless
+// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+// `axis`. If `keep_dims` is true, the reduced dimensions are
+// retained with length 1.
//
-// `value 20 5 16 3 7`
+// Arguments:
+// input: The tensor to reduce.
+// axis: The dimensions to reduce. Must be in the range
+// `[-rank(input), rank(input))`.
//
-// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
-// The result would be [20, 16] for fractional max pooling.
-// If not specified, defaults to false
-func FractionalMaxPoolOverlapping(value bool) FractionalMaxPoolAttr {
- return func(m optionalAttr) {
- m["overlapping"] = value
+// Returns The reduced tensor.
+func Sum(scope *Scope, input tf.Output, axis tf.Output, optional ...SumAttr) (output tf.Output) {
+ if scope.Err() != nil {
+ return
}
-}
-
-// FractionalMaxPoolDeterministic sets the optional deterministic attribute to value.
-//
-// value: When set to True, a fixed pooling region will be used when
-// iterating over a FractionalMaxPool node in the computation graph. Mainly used
-// in unit test to make FractionalMaxPool deterministic.
-// If not specified, defaults to false
-func FractionalMaxPoolDeterministic(value bool) FractionalMaxPoolAttr {
- return func(m optionalAttr) {
- m["deterministic"] = value
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
}
-}
-
-// FractionalMaxPoolSeed sets the optional seed attribute to value.
-//
-// value: If either seed or seed2 are set to be non-zero, the random number
-// generator is seeded by the given seed. Otherwise, it is seeded by a
-// random seed.
-// If not specified, defaults to 0
-func FractionalMaxPoolSeed(value int64) FractionalMaxPoolAttr {
- return func(m optionalAttr) {
- m["seed"] = value
+ opspec := tf.OpSpec{
+ Type: "Sum",
+ Input: []tf.Input{
+ input, axis,
+ },
+ Attrs: attrs,
}
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// FractionalMaxPoolSeed2 sets the optional seed2 attribute to value.
+// Delete the tensor specified by its handle in the session.
//
-// value: An second seed to avoid seed collision.
-// If not specified, defaults to 0
-func FractionalMaxPoolSeed2(value int64) FractionalMaxPoolAttr {
- return func(m optionalAttr) {
- m["seed2"] = value
+// Arguments:
+// handle: The handle for a tensor stored in the session state.
+//
+// Returns the created operation.
+func DeleteSessionTensor(scope *Scope, handle tf.Output) (o *tf.Operation) {
+ if scope.Err() != nil {
+ return
}
+ opspec := tf.OpSpec{
+ Type: "DeleteSessionTensor",
+ Input: []tf.Input{
+ handle,
+ },
+ }
+ return scope.AddOperation(opspec)
}
-// Performs fractional max pooling on the input.
-//
-// Fractional max pooling is slightly different than regular max pooling. In
-// regular max pooling, you downsize an input set by taking the maximum value of
-// smaller N x N subsections of the set (often 2x2), and try to reduce the set by
-// a factor of N, where N is an integer. Fractional max pooling, as you might
-// expect from the word "fractional", means that the overall reduction ratio N
-// does not have to be an integer.
-//
-// The sizes of the pooling regions are generated randomly but are fairly uniform.
-// For example, let's look at the height dimension, and the constraints on the
-// list of rows that will be pool boundaries.
-//
-// First we define the following:
-//
-// 1. input_row_length : the number of rows from the input set
-// 2. output_row_length : which will be smaller than the input
-// 3. alpha = input_row_length / output_row_length : our reduction ratio
-// 4. K = floor(alpha)
-// 5. row_pooling_sequence : this is the result list of pool boundary rows
-//
-// Then, row_pooling_sequence should satisfy:
+// L2 Loss.
//
-// 1. a[0] = 0 : the first value of the sequence is 0
-// 2. a[end] = input_row_length : the last value of the sequence is the size
-// 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
-// 4. length(row_pooling_sequence) = output_row_length+1
+// Computes half the L2 norm of a tensor without the `sqrt`:
//
-// For more details on fractional max pooling, see this paper:
-// [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
+// output = sum(t ** 2) / 2
//
// Arguments:
-// value: 4-D with shape `[batch, height, width, channels]`.
-// pooling_ratio: Pooling ratio for each dimension of `value`, currently only
-// supports row and col dimension and should be >= 1.0. For example, a valid
-// pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements
-// must be 1.0 because we don't allow pooling on batch and channels
-// dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions
-// respectively.
+// t: Typically 2-D, but may have any dimensions.
//
-// Returns output tensor after fractional max pooling.row pooling sequence, needed to calculate gradient.column pooling sequence, needed to calculate gradient.
-func FractionalMaxPool(scope *Scope, value tf.Output, pooling_ratio []float32, optional ...FractionalMaxPoolAttr) (output tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output) {
+// Returns 0-D.
+func L2Loss(scope *Scope, t tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"pooling_ratio": pooling_ratio}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "FractionalMaxPool",
+ Type: "L2Loss",
Input: []tf.Input{
- value,
+ t,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
+ return op.Output(0)
}
-// Concatenates a list of `SparseTensor` along the specified dimension.
-//
-// Concatenation is with respect to the dense versions of these sparse tensors.
-// It is assumed that each input is a `SparseTensor` whose elements are ordered
-// along increasing dimension number.
-//
-// All inputs' shapes must match, except for the concat dimension. The
-// `indices`, `values`, and `shapes` lists must have the same length.
-//
-// The output shape is identical to the inputs', except along the concat
-// dimension, where it is the sum of the inputs' sizes along that dimension.
-//
-// The output elements will be resorted to preserve the sort order along
-// increasing dimension number.
-//
-// This op runs in `O(M log M)` time, where `M` is the total number of non-empty
-// values across all inputs. This is due to the need for an internal sort in
-// order to concatenate efficiently across an arbitrary dimension.
-//
-// For example, if `concat_dim = 1` and the inputs are
-//
-// sp_inputs[0]: shape = [2, 3]
-// [0, 2]: "a"
-// [1, 0]: "b"
-// [1, 1]: "c"
-//
-// sp_inputs[1]: shape = [2, 4]
-// [0, 1]: "d"
-// [0, 2]: "e"
+// DenseToSparseSetOperationAttr is an optional argument to DenseToSparseSetOperation.
+type DenseToSparseSetOperationAttr func(optionalAttr)
+
+// DenseToSparseSetOperationValidateIndices sets the optional validate_indices attribute to value.
+// If not specified, defaults to true
+func DenseToSparseSetOperationValidateIndices(value bool) DenseToSparseSetOperationAttr {
+ return func(m optionalAttr) {
+ m["validate_indices"] = value
+ }
+}
+
+// Applies set operation along last dimension of `Tensor` and `SparseTensor`.
//
-// then the output will be
+// See SetOperationOp::SetOperationFromContext for values of `set_operation`.
//
-// shape = [2, 7]
-// [0, 2]: "a"
-// [0, 4]: "d"
-// [0, 5]: "e"
-// [1, 0]: "b"
-// [1, 1]: "c"
+// Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,
+// and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same
+// as `set1`. Dimension `n` contains values in a set, duplicates are allowed but
+// ignored.
//
-// Graphically this is equivalent to doing
+// If `validate_indices` is `True`, this op validates the order and range of `set2`
+// indices.
//
-// [ a] concat [ d e ] = [ a d e ]
-// [b c ] [ ] [b c ]
+// Output `result` is a `SparseTensor` represented by `result_indices`,
+// `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
+// has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
+// dimension contains the result of `set_operation` applied to the corresponding
+// `[0...n-1]` dimension of `set`.
//
// Arguments:
-// indices: 2-D. Indices of each input `SparseTensor`.
-// values: 1-D. Non-empty values of each `SparseTensor`.
-// shapes: 1-D. Shapes of each `SparseTensor`.
-// concat_dim: Dimension to concatenate along. Must be in range [-rank, rank),
-// where rank is the number of dimensions in each input `SparseTensor`.
+// set1: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.
+// Dimension `n` contains values in a set, duplicates are allowed but ignored.
+// set2_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
+// order.
+// set2_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major
+// order.
+// set2_shape: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must
+// be the same as the 1st `n-1` dimensions of `set1`, `result_shape[n]` is the
+// max set size across `n-1` dimensions.
//
-// Returns 2-D. Indices of the concatenated `SparseTensor`.1-D. Non-empty values of the concatenated `SparseTensor`.1-D. Shape of the concatenated `SparseTensor`.
-func SparseConcat(scope *Scope, indices []tf.Output, values []tf.Output, shapes []tf.Output, concat_dim int64) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
+//
+// Returns 2D indices of a `SparseTensor`.1D values of a `SparseTensor`.1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
+// the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`
+// is the max result set size across all `0...n-1` dimensions.
+func DenseToSparseSetOperation(scope *Scope, set1 tf.Output, set2_indices tf.Output, set2_values tf.Output, set2_shape tf.Output, set_operation string, optional ...DenseToSparseSetOperationAttr) (result_indices tf.Output, result_values tf.Output, result_shape tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"concat_dim": concat_dim}
+ attrs := map[string]interface{}{"set_operation": set_operation}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "SparseConcat",
+ Type: "DenseToSparseSetOperation",
Input: []tf.Input{
- tf.OutputList(indices), tf.OutputList(values), tf.OutputList(shapes),
+ set1, set2_indices, set2_values, set2_shape,
},
Attrs: attrs,
}
@@ -13298,22 +13377,38 @@ func SparseConcat(scope *Scope, indices []tf.Output, values []tf.Output, shapes
return op.Output(0), op.Output(1), op.Output(2)
}
-// Performs a padding as a preprocess during a convolution.
+// FusedResizeAndPadConv2DAttr is an optional argument to FusedResizeAndPadConv2D.
+type FusedResizeAndPadConv2DAttr func(optionalAttr)
+
+// FusedResizeAndPadConv2DResizeAlignCorners sets the optional resize_align_corners attribute to value.
//
-// Similar to FusedResizeAndPadConv2d, this op allows for an optimized
-// implementation where the spatial padding transformation stage is fused with the
-// im2col lookup, but in this case without the bilinear filtering required for
-// resizing. Fusing the padding prevents the need to write out the intermediate
-// results as whole tensors, reducing memory pressure, and we can get some latency
-// gains by merging the transformation calculations.
-// The data_format attribute for Conv2D isn't supported by this op, and 'NHWC'
-// order is used instead.
+// value: If true, rescale input by (new_height - 1) / (height - 1),
+// which exactly aligns the 4 corners of images and resized images. If false, rescale
+// by new_height / height. Treat similarly the width dimension.
+// If not specified, defaults to false
+func FusedResizeAndPadConv2DResizeAlignCorners(value bool) FusedResizeAndPadConv2DAttr {
+ return func(m optionalAttr) {
+ m["resize_align_corners"] = value
+ }
+}
+
+// Performs a resize and padding as a preprocess during a convolution.
+//
+// It's often possible to do spatial transformations more efficiently as part of
+// the packing stage of a convolution, so this op allows for an optimized
+// implementation where these stages are fused together. This prevents the need to
+// write out the intermediate results as whole tensors, reducing memory pressure,
+// and we can get some latency gains by merging the transformation calculations.
+// The data_format attribute for Conv2D isn't supported by this op, and defaults to
+// 'NHWC' order.
// Internally this op uses a single per-graph scratch buffer, which means that it
// will block if multiple versions are being run in parallel. This is because this
// operator is primarily an optimization to minimize memory usage.
//
// Arguments:
// input: 4-D with shape `[batch, in_height, in_width, in_channels]`.
+// size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The
+// new size for the images.
// paddings: A two-column matrix specifying the padding sizes. The number of
// rows must be the same as the rank of `input`.
// filter: 4-D with shape
@@ -13322,15 +13417,18 @@ func SparseConcat(scope *Scope, indices []tf.Output, values []tf.Output, shapes
// strides: 1-D of length 4. The stride of the sliding window for each dimension
// of `input`. Must be in the same order as the dimension specified with format.
// padding: The type of padding algorithm to use.
-func FusedPadConv2D(scope *Scope, input tf.Output, paddings tf.Output, filter tf.Output, mode string, strides []int64, padding string) (output tf.Output) {
+func FusedResizeAndPadConv2D(scope *Scope, input tf.Output, size tf.Output, paddings tf.Output, filter tf.Output, mode string, strides []int64, padding string, optional ...FusedResizeAndPadConv2DAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{"mode": mode, "strides": strides, "padding": padding}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "FusedPadConv2D",
+ Type: "FusedResizeAndPadConv2D",
Input: []tf.Input{
- input, paddings, filter,
+ input, size, paddings, filter,
},
Attrs: attrs,
}
@@ -13338,136 +13436,121 @@ func FusedPadConv2D(scope *Scope, input tf.Output, paddings tf.Output, filter tf
return op.Output(0)
}
-// Returns immutable tensor from memory region.
+// Subtracts a value from the current value of a variable.
//
-// The current implementation memmaps the tensor from a file.
+// Any ReadVariableOp which depends directly or indirectly on this assign is
+// guaranteed to see the incremented value or a subsequent newer one.
+//
+// Outputs the incremented value, which can be used to totally order the
+// increments to this variable.
//
// Arguments:
-// dtype: Type of the returned tensor.
-// shape: Shape of the returned tensor.
-// memory_region_name: Name of readonly memory region used by the tensor, see
-// NewReadOnlyMemoryRegionFromFile in tensorflow::Env.
-func ImmutableConst(scope *Scope, dtype tf.DataType, shape tf.Shape, memory_region_name string) (tensor tf.Output) {
+// resource: handle to the resource in which to store the variable.
+// value: the value by which the variable will be incremented.
+//
+// Returns the created operation.
+func AssignSubVariableOp(scope *Scope, resource tf.Output, value tf.Output) (o *tf.Operation) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtype": dtype, "shape": shape, "memory_region_name": memory_region_name}
opspec := tf.OpSpec{
- Type: "ImmutableConst",
-
- Attrs: attrs,
+ Type: "AssignSubVariableOp",
+ Input: []tf.Input{
+ resource, value,
+ },
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// Deserialize and concatenate `SparseTensors` from a serialized minibatch.
-//
-// The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where
-// `N` is the minibatch size and the rows correspond to packed outputs of
-// `SerializeSparse`. The ranks of the original `SparseTensor` objects
-// must all match. When the final `SparseTensor` is created, it has rank one
-// higher than the ranks of the incoming `SparseTensor` objects
-// (they have been concatenated along a new row dimension).
-//
-// The output `SparseTensor` object's shape values for all dimensions but the
-// first are the max across the input `SparseTensor` objects' shape values
-// for the corresponding dimensions. Its first shape value is `N`, the minibatch
-// size.
-//
-// The input `SparseTensor` objects' indices are assumed ordered in
-// standard lexicographic order. If this is not the case, after this
-// step run `SparseReorder` to restore index ordering.
-//
-// For example, if the serialized input is a `[2 x 3]` matrix representing two
-// original `SparseTensor` objects:
-//
-// index = [ 0]
-// [10]
-// [20]
-// values = [1, 2, 3]
-// shape = [50]
+// RestoreAttr is an optional argument to Restore.
+type RestoreAttr func(optionalAttr)
+
+// RestorePreferredShard sets the optional preferred_shard attribute to value.
//
-// and
+// value: Index of file to open first if multiple files match
+// `file_pattern`.
+// If not specified, defaults to -1
+func RestorePreferredShard(value int64) RestoreAttr {
+ return func(m optionalAttr) {
+ m["preferred_shard"] = value
+ }
+}
+
+// Restores a tensor from checkpoint files.
//
-// index = [ 2]
-// [10]
-// values = [4, 5]
-// shape = [30]
+// Reads a tensor stored in one or several files. If there are several files (for
+// instance because a tensor was saved as slices), `file_pattern` may contain
+// wildcard symbols (`*` and `?`) in the filename portion only, not in the
+// directory portion.
//
-// then the final deserialized `SparseTensor` will be:
+// If a `file_pattern` matches several files, `preferred_shard` can be used to hint
+// in which file the requested tensor is likely to be found. This op will first
+// open the file at index `preferred_shard` in the list of matching files and try
+// to restore tensors from that file. Only if some tensors or tensor slices are
+// not found in that first file, then the Op opens all the files. Setting
+// `preferred_shard` to match the value passed as the `shard` input
+// of a matching `Save` Op may speed up Restore. This attribute only affects
+// performance, not correctness. The default value -1 means files are processed in
+// order.
//
-// index = [0 0]
-// [0 10]
-// [0 20]
-// [1 2]
-// [1 10]
-// values = [1, 2, 3, 4, 5]
-// shape = [2 50]
+// See also `RestoreSlice`.
//
// Arguments:
-// serialized_sparse: 2-D, The `N` serialized `SparseTensor` objects.
-// Must have 3 columns.
-// dtype: The `dtype` of the serialized `SparseTensor` objects.
-func DeserializeManySparse(scope *Scope, serialized_sparse tf.Output, dtype tf.DataType) (sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output) {
+// file_pattern: Must have a single element. The pattern of the files from
+// which we read the tensor.
+// tensor_name: Must have a single element. The name of the tensor to be
+// restored.
+// dt: The type of the tensor to be restored.
+//
+// Returns The restored tensor.
+func Restore(scope *Scope, file_pattern tf.Output, tensor_name tf.Output, dt tf.DataType, optional ...RestoreAttr) (tensor tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtype": dtype}
+ attrs := map[string]interface{}{"dt": dt}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "DeserializeManySparse",
+ Type: "Restore",
Input: []tf.Input{
- serialized_sparse,
+ file_pattern, tensor_name,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
+ return op.Output(0)
}
-// SparseTensorDenseMatMulAttr is an optional argument to SparseTensorDenseMatMul.
-type SparseTensorDenseMatMulAttr func(optionalAttr)
+// QuantizedResizeBilinearAttr is an optional argument to QuantizedResizeBilinear.
+type QuantizedResizeBilinearAttr func(optionalAttr)
-// SparseTensorDenseMatMulAdjointA sets the optional adjoint_a attribute to value.
+// QuantizedResizeBilinearAlignCorners sets the optional align_corners attribute to value.
//
-// value: Use the adjoint of A in the matrix multiply. If A is complex, this
-// is transpose(conj(A)). Otherwise it's transpose(A).
+// value: If true, rescale input by (new_height - 1) / (height - 1), which
+// exactly aligns the 4 corners of images and resized images. If false, rescale
+// by new_height / height. Treat similarly the width dimension.
// If not specified, defaults to false
-func SparseTensorDenseMatMulAdjointA(value bool) SparseTensorDenseMatMulAttr {
+func QuantizedResizeBilinearAlignCorners(value bool) QuantizedResizeBilinearAttr {
return func(m optionalAttr) {
- m["adjoint_a"] = value
+ m["align_corners"] = value
}
}
-// SparseTensorDenseMatMulAdjointB sets the optional adjoint_b attribute to value.
+// Resize quantized `images` to `size` using quantized bilinear interpolation.
//
-// value: Use the adjoint of B in the matrix multiply. If B is complex, this
-// is transpose(conj(B)). Otherwise it's transpose(B).
-// If not specified, defaults to false
-func SparseTensorDenseMatMulAdjointB(value bool) SparseTensorDenseMatMulAttr {
- return func(m optionalAttr) {
- m["adjoint_b"] = value
- }
-}
-
-// Multiply SparseTensor (of rank 2) "A" by dense matrix "B".
+// Input images and output images must be quantized types.
//
-// No validity checking is performed on the indices of A. However, the following
-// input format is recommended for optimal behavior:
+// Arguments:
+// images: 4-D with shape `[batch, height, width, channels]`.
+// size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The
+// new size for the images.
//
-// if adjoint_a == false:
-// A should be sorted in lexicographically increasing order. Use SparseReorder
-// if you're not sure.
-// if adjoint_a == true:
-// A should be sorted in order of increasing dimension 1 (i.e., "column major"
-// order instead of "row major" order).
//
-// Arguments:
-// a_indices: 2-D. The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix.
-// a_values: 1-D. The `values` of the `SparseTensor`, size `[nnz]` Vector.
-// a_shape: 1-D. The `shape` of the `SparseTensor`, size `[2]` Vector.
-// b: 2-D. A dense Matrix.
-func SparseTensorDenseMatMul(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b tf.Output, optional ...SparseTensorDenseMatMulAttr) (product tf.Output) {
+//
+// Returns 4-D with shape
+// `[batch, new_height, new_width, channels]`.
+func QuantizedResizeBilinear(scope *Scope, images tf.Output, size tf.Output, min tf.Output, max tf.Output, optional ...QuantizedResizeBilinearAttr) (resized_images tf.Output, out_min tf.Output, out_max tf.Output) {
if scope.Err() != nil {
return
}
@@ -13476,269 +13559,361 @@ func SparseTensorDenseMatMul(scope *Scope, a_indices tf.Output, a_values tf.Outp
a(attrs)
}
opspec := tf.OpSpec{
- Type: "SparseTensorDenseMatMul",
+ Type: "QuantizedResizeBilinear",
Input: []tf.Input{
- a_indices, a_values, a_shape, b,
+ images, size, min, max,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
+ return op.Output(0), op.Output(1), op.Output(2)
+}
+
+// Computes the minimum along segments of a tensor.
+//
+// Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+// segments.
+//
+// Computes a tensor such that
+// \\(output_i = \min_j(data_j)\\) where `min` is over `j` such
+// that `segment_ids[j] == i`.
+//
+// If the min is empty for a given segment ID `i`, `output[i] = 0`.
+//
+// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMin.png" alt>
+// </div>
+//
+// Arguments:
+//
+// segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s
+// first dimension. Values should be sorted and can be repeated.
+//
+// Returns Has same shape as data, except for dimension 0 which
+// has size `k`, the number of segments.
+func SegmentMin(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "SegmentMin",
+ Input: []tf.Input{
+ data, segment_ids,
+ },
+ }
+ op := scope.AddOperation(opspec)
return op.Output(0)
}
-// WriteImageSummaryAttr is an optional argument to WriteImageSummary.
-type WriteImageSummaryAttr func(optionalAttr)
+// SdcaOptimizerAttr is an optional argument to SdcaOptimizer.
+type SdcaOptimizerAttr func(optionalAttr)
-// WriteImageSummaryMaxImages sets the optional max_images attribute to value.
-//
-// value: Max number of batch elements to generate images for.
-// If not specified, defaults to 3
+// SdcaOptimizerAdaptative sets the optional adaptative attribute to value.
//
-// REQUIRES: value >= 1
-func WriteImageSummaryMaxImages(value int64) WriteImageSummaryAttr {
+// value: Whether to use Adapative SDCA for the inner loop.
+// If not specified, defaults to false
+func SdcaOptimizerAdaptative(value bool) SdcaOptimizerAttr {
return func(m optionalAttr) {
- m["max_images"] = value
+ m["adaptative"] = value
}
}
-// Writes a `Summary` protocol buffer with images.
-//
-// The summary has up to `max_images` summary values containing images. The
-// images are built from `tensor` which must be 4-D with shape `[batch_size,
-// height, width, channels]` and where `channels` can be:
-//
-// * 1: `tensor` is interpreted as Grayscale.
-// * 3: `tensor` is interpreted as RGB.
-// * 4: `tensor` is interpreted as RGBA.
-//
-// The images have the same number of channels as the input tensor. For float
-// input, the values are normalized one image at a time to fit in the range
-// `[0, 255]`. `uint8` values are unchanged. The op uses two different
-// normalization algorithms:
+// Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for
//
-// * If the input values are all positive, they are rescaled so the largest one
-// is 255.
+// linear models with L1 + L2 regularization. As global optimization objective is
+// strongly-convex, the optimizer optimizes the dual objective at each step. The
+// optimizer applies each update one example at a time. Examples are sampled
+// uniformly, and the optimizer is learning rate free and enjoys linear convergence
+// rate.
//
-// * If any input value is negative, the values are shifted so input value 0.0
-// is at 127. They are then rescaled so that either the smallest value is 0,
-// or the largest one is 255.
+// [Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).<br>
+// Shai Shalev-Shwartz, Tong Zhang. 2012
//
-// The `tag` argument is a scalar `Tensor` of type `string`. It is used to
-// build the `tag` of the summary values:
+// $$Loss Objective = \sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$
//
-// * If `max_images` is 1, the summary value tag is '*tag*/image'.
-// * If `max_images` is greater than 1, the summary value tags are
-// generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
+// [Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).<br>
+// Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan,
+// Peter Richtarik, Martin Takac. 2015
//
-// The `bad_color` argument is the color to use in the generated images for
-// non-finite input values. It is a `unit8` 1-D tensor of length `channels`.
-// Each element must be in the range `[0, 255]` (It represents the value of a
-// pixel in the output image). Non-finite values in the input tensor are
-// replaced by this tensor in the output image. The default value is the color
-// red.
+// [Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).<br>
+// Dominik Csiba, Zheng Qu, Peter Richtarik. 2015
//
// Arguments:
-// writer: A handle to a summary writer.
-// step: The step to write the summary for.
-// tag: Scalar. Used to build the `tag` attribute of the summary values.
-// tensor: 4-D of shape `[batch_size, height, width, channels]` where
-// `channels` is 1, 3, or 4.
-// bad_color: Color to use for pixels with non-finite values.
+// sparse_example_indices: a list of vectors which contain example indices.
+// sparse_feature_indices: a list of vectors which contain feature indices.
+// sparse_feature_values: a list of vectors which contains feature value
+// associated with each feature group.
+// dense_features: a list of matrices which contains the dense feature values.
+// example_weights: a vector which contains the weight associated with each
+// example.
+// example_labels: a vector which contains the label/target associated with each
+// example.
+// sparse_indices: a list of vectors where each value is the indices which has
+// corresponding weights in sparse_weights. This field maybe omitted for the
+// dense approach.
+// sparse_weights: a list of vectors where each value is the weight associated with
+// a sparse feature group.
+// dense_weights: a list of vectors where the values are the weights associated
+// with a dense feature group.
+// example_state_data: a list of vectors containing the example state data.
+// loss_type: Type of the primal loss. Currently SdcaSolver supports logistic,
+// squared and hinge losses.
+// l1: Symmetric l1 regularization strength.
+// l2: Symmetric l2 regularization strength.
+// num_loss_partitions: Number of partitions of the global loss function.
+// num_inner_iterations: Number of iterations per mini-batch.
//
-// Returns the created operation.
-func WriteImageSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, tensor tf.Output, bad_color tf.Output, optional ...WriteImageSummaryAttr) (o *tf.Operation) {
+// Returns a list of vectors containing the updated example state
+// data.a list of vectors where each value is the delta
+// weights associated with a sparse feature group.a list of vectors where the values are the delta
+// weights associated with a dense feature group.
+func SdcaOptimizer(scope *Scope, sparse_example_indices []tf.Output, sparse_feature_indices []tf.Output, sparse_feature_values []tf.Output, dense_features []tf.Output, example_weights tf.Output, example_labels tf.Output, sparse_indices []tf.Output, sparse_weights []tf.Output, dense_weights []tf.Output, example_state_data tf.Output, loss_type string, l1 float32, l2 float32, num_loss_partitions int64, num_inner_iterations int64, optional ...SdcaOptimizerAttr) (out_example_state_data tf.Output, out_delta_sparse_weights []tf.Output, out_delta_dense_weights []tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"loss_type": loss_type, "l1": l1, "l2": l2, "num_loss_partitions": num_loss_partitions, "num_inner_iterations": num_inner_iterations}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "WriteImageSummary",
+ Type: "SdcaOptimizer",
Input: []tf.Input{
- writer, step, tag, tensor, bad_color,
+ tf.OutputList(sparse_example_indices), tf.OutputList(sparse_feature_indices), tf.OutputList(sparse_feature_values), tf.OutputList(dense_features), example_weights, example_labels, tf.OutputList(sparse_indices), tf.OutputList(sparse_weights), tf.OutputList(dense_weights), example_state_data,
},
Attrs: attrs,
}
- return scope.AddOperation(opspec)
-}
-
-// Pads a tensor with zeros.
-//
-// This operation pads a `input` with zeros according to the `paddings` you
-// specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the
-// rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
-// how many zeros to add before the contents of `input` in that dimension, and
-// `paddings[D, 1]` indicates how many zeros to add after the contents of `input`
-// in that dimension.
-//
-// The padded size of each dimension D of the output is:
-//
-// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
-//
-// For example:
-//
-// ```
-// # 't' is [[1, 1], [2, 2]]
-// # 'paddings' is [[1, 1], [2, 2]]
-// # rank of 't' is 2
-// pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
-// [0, 0, 1, 1, 0, 0]
-// [0, 0, 2, 2, 0, 0]
-// [0, 0, 0, 0, 0, 0]]
-// ```
-func Pad(scope *Scope, input tf.Output, paddings tf.Output) (output tf.Output) {
+ op := scope.AddOperation(opspec)
if scope.Err() != nil {
return
}
- opspec := tf.OpSpec{
- Type: "Pad",
- Input: []tf.Input{
- input, paddings,
- },
+ var idx int
+ var err error
+ out_example_state_data = op.Output(idx)
+ if out_delta_sparse_weights, idx, err = makeOutputList(op, idx, "out_delta_sparse_weights"); err != nil {
+ scope.UpdateErr("SdcaOptimizer", err)
+ return
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ if out_delta_dense_weights, idx, err = makeOutputList(op, idx, "out_delta_dense_weights"); err != nil {
+ scope.UpdateErr("SdcaOptimizer", err)
+ return
+ }
+ return out_example_state_data, out_delta_sparse_weights, out_delta_dense_weights
}
-// Creates a dataset that emits the lines of one or more text files.
+// SparseMatMulAttr is an optional argument to SparseMatMul.
+type SparseMatMulAttr func(optionalAttr)
+
+// SparseMatMulTransposeA sets the optional transpose_a attribute to value.
+// If not specified, defaults to false
+func SparseMatMulTransposeA(value bool) SparseMatMulAttr {
+ return func(m optionalAttr) {
+ m["transpose_a"] = value
+ }
+}
+
+// SparseMatMulTransposeB sets the optional transpose_b attribute to value.
+// If not specified, defaults to false
+func SparseMatMulTransposeB(value bool) SparseMatMulAttr {
+ return func(m optionalAttr) {
+ m["transpose_b"] = value
+ }
+}
+
+// SparseMatMulAIsSparse sets the optional a_is_sparse attribute to value.
+// If not specified, defaults to false
+func SparseMatMulAIsSparse(value bool) SparseMatMulAttr {
+ return func(m optionalAttr) {
+ m["a_is_sparse"] = value
+ }
+}
+
+// SparseMatMulBIsSparse sets the optional b_is_sparse attribute to value.
+// If not specified, defaults to false
+func SparseMatMulBIsSparse(value bool) SparseMatMulAttr {
+ return func(m optionalAttr) {
+ m["b_is_sparse"] = value
+ }
+}
+
+// Multiply matrix "a" by matrix "b".
//
-// Arguments:
-// filenames: A scalar or a vector containing the name(s) of the file(s) to be
-// read.
-// compression_type: A scalar containing either (i) the empty string (no
-// compression), (ii) "ZLIB", or (iii) "GZIP".
-// buffer_size: A scalar containing the number of bytes to buffer.
-func TextLineDataset(scope *Scope, filenames tf.Output, compression_type tf.Output, buffer_size tf.Output) (handle tf.Output) {
+// The inputs must be two-dimensional matrices and the inner dimension of "a" must
+// match the outer dimension of "b". This op is optimized for the case where at
+// least one of "a" or "b" is sparse. The breakeven for using this versus a dense
+// matrix multiply on one platform was 30% zero values in the sparse matrix.
+//
+// The gradient computation of this operation will only take advantage of sparsity
+// in the input gradient when that gradient comes from a Relu.
+func SparseMatMul(scope *Scope, a tf.Output, b tf.Output, optional ...SparseMatMulAttr) (product tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "TextLineDataset",
+ Type: "SparseMatMul",
Input: []tf.Input{
- filenames, compression_type, buffer_size,
+ a, b,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Returns the number of records this Reader has produced.
+// Computes the power of one value to another.
//
-// This is the same as the number of ReaderRead executions that have
-// succeeded.
+// Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
+// corresponding elements in `x` and `y`. For example:
//
-// Arguments:
-// reader_handle: Handle to a Reader.
-func ReaderNumRecordsProducedV2(scope *Scope, reader_handle tf.Output) (records_produced tf.Output) {
+// ```
+// # tensor 'x' is [[2, 2]], [3, 3]]
+// # tensor 'y' is [[8, 16], [2, 3]]
+// tf.pow(x, y) ==> [[256, 65536], [9, 27]]
+// ```
+func Pow(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "ReaderNumRecordsProducedV2",
+ Type: "Pow",
Input: []tf.Input{
- reader_handle,
+ x, y,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Computes exponential of x - 1 element-wise.
+// ShapeAttr is an optional argument to Shape.
+type ShapeAttr func(optionalAttr)
+
+// ShapeOutType sets the optional out_type attribute to value.
+// If not specified, defaults to DT_INT32
+func ShapeOutType(value tf.DataType) ShapeAttr {
+ return func(m optionalAttr) {
+ m["out_type"] = value
+ }
+}
+
+// Returns the shape of a tensor.
//
-// I.e., \\(y = (\exp x) - 1\\).
-func Expm1(scope *Scope, x tf.Output) (y tf.Output) {
+// This operation returns a 1-D integer tensor representing the shape of `input`.
+//
+// For example:
+//
+// ```
+// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
+// shape(t) ==> [2, 2, 3]
+// ```
+func Shape(scope *Scope, input tf.Output, optional ...ShapeAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "Expm1",
+ Type: "Shape",
Input: []tf.Input{
- x,
+ input,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Batch normalization.
-//
-// DEPRECATED at GraphDef version 9: Use tf.nn.batch_normalization()
-//
-// This op is deprecated. Prefer `tf.nn.batch_normalization`.
+// Computes fingerprints of the input strings.
//
// Arguments:
-// t: A 4D input Tensor.
-// m: A 1D mean Tensor with size matching the last dimension of t.
-// This is the first output from tf.nn.moments,
-// or a saved moving average thereof.
-// v: A 1D variance Tensor with size matching the last dimension of t.
-// This is the second output from tf.nn.moments,
-// or a saved moving average thereof.
-// beta: A 1D beta Tensor with size matching the last dimension of t.
-// An offset to be added to the normalized tensor.
-// gamma: A 1D gamma Tensor with size matching the last dimension of t.
-// If "scale_after_normalization" is true, this tensor will be multiplied
-// with the normalized tensor.
-// variance_epsilon: A small float number to avoid dividing by 0.
-// scale_after_normalization: A bool indicating whether the resulted tensor
-// needs to be multiplied with gamma.
-func BatchNormWithGlobalNormalization(scope *Scope, t tf.Output, m tf.Output, v tf.Output, beta tf.Output, gamma tf.Output, variance_epsilon float32, scale_after_normalization bool) (result tf.Output) {
+// input: vector of strings to compute fingerprints on.
+//
+// Returns a (N,2) shaped matrix where N is the number of elements in the input
+// vector. Each row contains the low and high parts of the fingerprint.
+func SdcaFprint(scope *Scope, input tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"variance_epsilon": variance_epsilon, "scale_after_normalization": scale_after_normalization}
opspec := tf.OpSpec{
- Type: "BatchNormWithGlobalNormalization",
+ Type: "SdcaFprint",
Input: []tf.Input{
- t, m, v, beta, gamma,
+ input,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// MaxPoolV2Attr is an optional argument to MaxPoolV2.
-type MaxPoolV2Attr func(optionalAttr)
+// RandomPoissonV2Attr is an optional argument to RandomPoissonV2.
+type RandomPoissonV2Attr func(optionalAttr)
-// MaxPoolV2DataFormat sets the optional data_format attribute to value.
+// RandomPoissonV2Seed sets the optional seed attribute to value.
//
-// value: Specify the data format of the input and output data. With the
-// default format "NHWC", the data is stored in the order of:
-// [batch, in_height, in_width, in_channels].
-// Alternatively, the format could be "NCHW", the data storage order of:
-// [batch, in_channels, in_height, in_width].
-// If not specified, defaults to "NHWC"
-func MaxPoolV2DataFormat(value string) MaxPoolV2Attr {
+// value: If either `seed` or `seed2` are set to be non-zero, the random number
+// generator is seeded by the given seed. Otherwise, it is seeded by a
+// random seed.
+// If not specified, defaults to 0
+func RandomPoissonV2Seed(value int64) RandomPoissonV2Attr {
return func(m optionalAttr) {
- m["data_format"] = value
+ m["seed"] = value
}
}
-// Performs max pooling on the input.
+// RandomPoissonV2Seed2 sets the optional seed2 attribute to value.
+//
+// value: A second seed to avoid seed collision.
+// If not specified, defaults to 0
+func RandomPoissonV2Seed2(value int64) RandomPoissonV2Attr {
+ return func(m optionalAttr) {
+ m["seed2"] = value
+ }
+}
+
+// RandomPoissonV2Dtype sets the optional dtype attribute to value.
+// If not specified, defaults to DT_INT64
+func RandomPoissonV2Dtype(value tf.DataType) RandomPoissonV2Attr {
+ return func(m optionalAttr) {
+ m["dtype"] = value
+ }
+}
+
+// Outputs random values from the Poisson distribution(s) described by rate.
+//
+// This op uses two algorithms, depending on rate. If rate >= 10, then
+// the algorithm by Hormann is used to acquire samples via
+// transformation-rejection.
+// See http://www.sciencedirect.com/science/article/pii/0167668793909974.
+//
+// Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform
+// random variables.
+// See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer
+// Programming, Volume 2. Addison Wesley
//
// Arguments:
-// input: 4-D input to pool over.
-// ksize: The size of the window for each dimension of the input tensor.
-// strides: The stride of the sliding window for each dimension of the
-// input tensor.
-// padding: The type of padding algorithm to use.
+// shape: 1-D integer tensor. Shape of independent samples to draw from each
+// distribution described by the shape parameters given in rate.
+// rate: A tensor in which each scalar is a "rate" parameter describing the
+// associated poisson distribution.
//
-// Returns The max pooled output tensor.
-func MaxPoolV2(scope *Scope, input tf.Output, ksize tf.Output, strides tf.Output, padding string, optional ...MaxPoolV2Attr) (output tf.Output) {
+// Returns A tensor with shape `shape + shape(rate)`. Each slice
+// `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for
+// `rate[i0, i1, ...iN]`.
+func RandomPoissonV2(scope *Scope, shape tf.Output, rate tf.Output, optional ...RandomPoissonV2Attr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"padding": padding}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "MaxPoolV2",
+ Type: "RandomPoissonV2",
Input: []tf.Input{
- input, ksize, strides,
+ shape, rate,
},
Attrs: attrs,
}
@@ -13746,43 +13921,59 @@ func MaxPoolV2(scope *Scope, input tf.Output, ksize tf.Output, strides tf.Output
return op.Output(0)
}
-// SparseReduceMaxAttr is an optional argument to SparseReduceMax.
-type SparseReduceMaxAttr func(optionalAttr)
+// MatrixTriangularSolveAttr is an optional argument to MatrixTriangularSolve.
+type MatrixTriangularSolveAttr func(optionalAttr)
-// SparseReduceMaxKeepDims sets the optional keep_dims attribute to value.
+// MatrixTriangularSolveLower sets the optional lower attribute to value.
//
-// value: If true, retain reduced dimensions with length 1.
+// value: Boolean indicating whether the innermost matrices in `matrix` are
+// lower or upper triangular.
+// If not specified, defaults to true
+func MatrixTriangularSolveLower(value bool) MatrixTriangularSolveAttr {
+ return func(m optionalAttr) {
+ m["lower"] = value
+ }
+}
+
+// MatrixTriangularSolveAdjoint sets the optional adjoint attribute to value.
+//
+// value: Boolean indicating whether to solve with `matrix` or its (block-wise)
+// adjoint.
+//
+// @compatibility(numpy)
+// Equivalent to np.linalg.triangular_solve
+// @end_compatibility
// If not specified, defaults to false
-func SparseReduceMaxKeepDims(value bool) SparseReduceMaxAttr {
+func MatrixTriangularSolveAdjoint(value bool) MatrixTriangularSolveAttr {
return func(m optionalAttr) {
- m["keep_dims"] = value
+ m["adjoint"] = value
}
}
-// Computes the max of elements across dimensions of a SparseTensor.
+// Solves systems of linear equations with upper or lower triangular matrices by
//
-// This Op takes a SparseTensor and is the sparse counterpart to
-// `tf.reduce_max()`. In particular, this Op also returns a dense `Tensor`
-// instead of a sparse one.
+// backsubstitution.
//
-// Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
-// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
-// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
-// with length 1.
+// `matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form
+// square matrices. If `lower` is `True` then the strictly upper triangular part
+// of each inner-most matrix is assumed to be zero and not accessed.
+// If `lower` is False then the strictly lower triangular part of each inner-most
+// matrix is assumed to be zero and not accessed.
+// `rhs` is a tensor of shape `[..., M, K]`.
//
-// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
-// with a single element is returned. Additionally, the axes can be negative,
-// which are interpreted according to the indexing rules in Python.
+// The output is a tensor of shape `[..., M, K]`. If `adjoint` is
+// `True` then the innermost matrices in `output` satisfy matrix equations
+// `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
+// If `adjoint` is `False` then the strictly then the innermost matrices in
+// `output` satisfy matrix equations
+// `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`.
//
// Arguments:
-// input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
-// SparseTensor, possibly not in canonical ordering.
-// input_values: 1-D. `N` non-empty values corresponding to `input_indices`.
-// input_shape: 1-D. Shape of the input SparseTensor.
-// reduction_axes: 1-D. Length-`K` vector containing the reduction axes.
+// matrix: Shape is `[..., M, M]`.
+// rhs: Shape is `[..., M, K]`.
//
-// Returns `R-K`-D. The reduced Tensor.
-func SparseReduceMax(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceMaxAttr) (output tf.Output) {
+// Returns Shape is `[..., M, K]`.
+func MatrixTriangularSolve(scope *Scope, matrix tf.Output, rhs tf.Output, optional ...MatrixTriangularSolveAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -13791,9 +13982,9 @@ func SparseReduceMax(scope *Scope, input_indices tf.Output, input_values tf.Outp
a(attrs)
}
opspec := tf.OpSpec{
- Type: "SparseReduceMax",
+ Type: "MatrixTriangularSolve",
Input: []tf.Input{
- input_indices, input_values, input_shape, reduction_axes,
+ matrix, rhs,
},
Attrs: attrs,
}
@@ -13801,119 +13992,97 @@ func SparseReduceMax(scope *Scope, input_indices tf.Output, input_values tf.Outp
return op.Output(0)
}
-// Subtracts a value from the current value of a variable.
-//
-// Any ReadVariableOp which depends directly or indirectly on this assign is
-// guaranteed to see the incremented value or a subsequent newer one.
-//
-// Outputs the incremented value, which can be used to totally order the
-// increments to this variable.
-//
-// Arguments:
-// resource: handle to the resource in which to store the variable.
-// value: the value by which the variable will be incremented.
-//
-// Returns the created operation.
-func AssignSubVariableOp(scope *Scope, resource tf.Output, value tf.Output) (o *tf.Operation) {
+// Computes inverse hyperbolic sine of x element-wise.
+func Asinh(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "AssignSubVariableOp",
+ Type: "Asinh",
Input: []tf.Input{
- resource, value,
+ x,
},
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Execute a sub graph on a remote processor.
-//
-// The graph specifications(such as graph itself, input tensors and output names)
-// are stored as a serialized protocol buffer of RemoteFusedGraphExecuteInfo
-// as serialized_remote_fused_graph_execute_info.
-// The specifications will be passed to a dedicated registered
-// remote fused graph executor. The executor will send the graph specifications
-// to a remote processor and execute that graph. The execution results
-// will be passed to consumer nodes as outputs of this node.
+// Creates a dataset with a range of values. Corresponds to python's xrange.
//
// Arguments:
-// inputs: Arbitrary number of tensors with arbitrary data types
+// start: corresponds to start in python's xrange().
+// stop: corresponds to stop in python's xrange().
+// step: corresponds to step in python's xrange().
//
-// serialized_remote_fused_graph_execute_info: Serialized protocol buffer
-// of RemoteFusedGraphExecuteInfo which contains graph specifications.
//
-// Returns Arbitrary number of tensors with arbitrary data types
-func RemoteFusedGraphExecute(scope *Scope, inputs []tf.Output, Toutputs []tf.DataType, serialized_remote_fused_graph_execute_info string) (outputs []tf.Output) {
+func RangeDataset(scope *Scope, start tf.Output, stop tf.Output, step tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"Toutputs": Toutputs, "serialized_remote_fused_graph_execute_info": serialized_remote_fused_graph_execute_info}
+ attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
opspec := tf.OpSpec{
- Type: "RemoteFusedGraphExecute",
+ Type: "RangeDataset",
Input: []tf.Input{
- tf.OutputList(inputs),
+ start, stop, step,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- if scope.Err() != nil {
- return
- }
- var idx int
- var err error
- if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
- scope.UpdateErr("RemoteFusedGraphExecute", err)
- return
- }
- return outputs
+ return op.Output(0)
}
-// Conv3DBackpropFilterV2Attr is an optional argument to Conv3DBackpropFilterV2.
-type Conv3DBackpropFilterV2Attr func(optionalAttr)
+// DepthwiseConv2dNativeBackpropInputAttr is an optional argument to DepthwiseConv2dNativeBackpropInput.
+type DepthwiseConv2dNativeBackpropInputAttr func(optionalAttr)
-// Conv3DBackpropFilterV2DataFormat sets the optional data_format attribute to value.
+// DepthwiseConv2dNativeBackpropInputDataFormat sets the optional data_format attribute to value.
//
-// value: The data format of the input and output data. With the
-// default format "NDHWC", the data is stored in the order of:
-// [batch, in_depth, in_height, in_width, in_channels].
-// Alternatively, the format could be "NCDHW", the data storage order is:
-// [batch, in_channels, in_depth, in_height, in_width].
-// If not specified, defaults to "NDHWC"
-func Conv3DBackpropFilterV2DataFormat(value string) Conv3DBackpropFilterV2Attr {
+// value: Specify the data format of the input and output data. With the
+// default format "NHWC", the data is stored in the order of:
+// [batch, height, width, channels].
+// Alternatively, the format could be "NCHW", the data storage order of:
+// [batch, channels, height, width].
+// If not specified, defaults to "NHWC"
+func DepthwiseConv2dNativeBackpropInputDataFormat(value string) DepthwiseConv2dNativeBackpropInputAttr {
return func(m optionalAttr) {
m["data_format"] = value
}
}
-// Conv3DBackpropFilterV2Dilations sets the optional dilations attribute to value.
+// DepthwiseConv2dNativeBackpropInputDilations sets the optional dilations attribute to value.
//
-// value: 1-D tensor of length 5. The dilation factor for each dimension of
-// `input`. If set to k > 1, there will be k-1 skipped cells between each
-// filter element on that dimension. The dimension order is determined by the
-// value of `data_format`, see above for details. Dilations in the batch and
-// depth dimensions must be 1.
-// If not specified, defaults to <i:1 i:1 i:1 i:1 i:1 >
-func Conv3DBackpropFilterV2Dilations(value []int64) Conv3DBackpropFilterV2Attr {
+// value: 1-D tensor of length 4. The dilation factor for each dimension of
+// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
+// element on that dimension. The dimension order is determined by the value of
+// `data_format`, see above for details. Dilations in the batch and depth
+// dimensions must be 1.
+// If not specified, defaults to <i:1 i:1 i:1 i:1 >
+func DepthwiseConv2dNativeBackpropInputDilations(value []int64) DepthwiseConv2dNativeBackpropInputAttr {
return func(m optionalAttr) {
m["dilations"] = value
}
}
-// Computes the gradients of 3-D convolution with respect to the filter.
+// Computes the gradients of depthwise convolution with respect to the input.
//
// Arguments:
-// input: Shape `[batch, depth, rows, cols, in_channels]`.
-// filter_sizes: An integer vector representing the tensor shape of `filter`,
-// where `filter` is a 5-D
-// `[filter_depth, filter_height, filter_width, in_channels, out_channels]`
-// tensor.
-// out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
-// out_channels]`.
-// strides: 1-D tensor of length 5. The stride of the sliding window for each
-// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
+// input_sizes: An integer vector representing the shape of `input`, based
+// on `data_format`. For example, if `data_format` is 'NHWC' then
+// `input` is a 4-D `[batch, height, width, channels]` tensor.
+// filter: 4-D with shape
+// `[filter_height, filter_width, in_channels, depthwise_multiplier]`.
+// out_backprop: 4-D with shape based on `data_format`.
+// For example, if `data_format` is 'NHWC' then
+// out_backprop shape is `[batch, out_height, out_width, out_channels]`.
+// Gradients w.r.t. the output of the convolution.
+// strides: The stride of the sliding window for each dimension of the input
+// of the convolution.
// padding: The type of padding algorithm to use.
-func Conv3DBackpropFilterV2(scope *Scope, input tf.Output, filter_sizes tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv3DBackpropFilterV2Attr) (output tf.Output) {
+//
+// Returns 4-D with shape according to `data_format`. For example, if
+// `data_format` is 'NHWC', output shape is `[batch, in_height,
+// in_width, in_channels]`. Gradient w.r.t. the input of the
+// convolution.
+func DepthwiseConv2dNativeBackpropInput(scope *Scope, input_sizes tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...DepthwiseConv2dNativeBackpropInputAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -13922,9 +14091,9 @@ func Conv3DBackpropFilterV2(scope *Scope, input tf.Output, filter_sizes tf.Outpu
a(attrs)
}
opspec := tf.OpSpec{
- Type: "Conv3DBackpropFilterV2",
+ Type: "DepthwiseConv2dNativeBackpropInput",
Input: []tf.Input{
- input, filter_sizes, out_backprop,
+ input_sizes, filter, out_backprop,
},
Attrs: attrs,
}
@@ -13932,110 +14101,142 @@ func Conv3DBackpropFilterV2(scope *Scope, input tf.Output, filter_sizes tf.Outpu
return op.Output(0)
}
-// OrderedMapUnstageNoKeyAttr is an optional argument to OrderedMapUnstageNoKey.
-type OrderedMapUnstageNoKeyAttr func(optionalAttr)
-
-// OrderedMapUnstageNoKeyCapacity sets the optional capacity attribute to value.
-// If not specified, defaults to 0
+// Adds sparse updates to the variable referenced by `resource`.
//
-// REQUIRES: value >= 0
-func OrderedMapUnstageNoKeyCapacity(value int64) OrderedMapUnstageNoKeyAttr {
- return func(m optionalAttr) {
- m["capacity"] = value
- }
-}
-
-// OrderedMapUnstageNoKeyMemoryLimit sets the optional memory_limit attribute to value.
-// If not specified, defaults to 0
+// This operation computes
//
-// REQUIRES: value >= 0
-func OrderedMapUnstageNoKeyMemoryLimit(value int64) OrderedMapUnstageNoKeyAttr {
- return func(m optionalAttr) {
- m["memory_limit"] = value
- }
-}
-
-// OrderedMapUnstageNoKeyContainer sets the optional container attribute to value.
-// If not specified, defaults to ""
-func OrderedMapUnstageNoKeyContainer(value string) OrderedMapUnstageNoKeyAttr {
- return func(m optionalAttr) {
- m["container"] = value
+// # Scalar indices
+// ref[indices, ...] += updates[...]
+//
+// # Vector indices (for each i)
+// ref[indices[i], ...] += updates[i, ...]
+//
+// # High rank indices (for each i, ..., j)
+// ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
+//
+// Duplicate entries are handled correctly: if multiple `indices` reference
+// the same location, their contributions add.
+//
+// Requires `updates.shape = indices.shape + ref.shape[1:]`.
+//
+// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
+// </div>
+//
+// Arguments:
+// resource: Should be from a `Variable` node.
+// indices: A tensor of indices into the first dimension of `ref`.
+// updates: A tensor of updated values to add to `ref`.
+//
+// Returns the created operation.
+func ResourceScatterAdd(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
+ if scope.Err() != nil {
+ return
}
-}
-
-// OrderedMapUnstageNoKeySharedName sets the optional shared_name attribute to value.
-// If not specified, defaults to ""
-func OrderedMapUnstageNoKeySharedName(value string) OrderedMapUnstageNoKeyAttr {
- return func(m optionalAttr) {
- m["shared_name"] = value
+ opspec := tf.OpSpec{
+ Type: "ResourceScatterAdd",
+ Input: []tf.Input{
+ resource, indices, updates,
+ },
}
+ return scope.AddOperation(opspec)
}
-// Op removes and returns the (key, value) element with the smallest
+// Computes the gradient for the inverse of `x` wrt its input.
//
-// key from the underlying container. If the underlying container
-// does not contain elements, the op will block until it does.
-func OrderedMapUnstageNoKey(scope *Scope, indices tf.Output, dtypes []tf.DataType, optional ...OrderedMapUnstageNoKeyAttr) (key tf.Output, values []tf.Output) {
+// Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
+// is the corresponding input gradient.
+func ReciprocalGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtypes": dtypes}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "OrderedMapUnstageNoKey",
+ Type: "ReciprocalGrad",
Input: []tf.Input{
- indices,
+ y, dy,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
+// Returns the min of x and y (i.e. x < y ? x : y) element-wise.
+//
+// *NOTE*: `Minimum` supports broadcasting. More about broadcasting
+// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+func Minimum(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
- var idx int
- var err error
- key = op.Output(idx)
- if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
- scope.UpdateErr("OrderedMapUnstageNoKey", err)
- return
+ opspec := tf.OpSpec{
+ Type: "Minimum",
+ Input: []tf.Input{
+ x, y,
+ },
}
- return key, values
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// DataFormatVecPermuteAttr is an optional argument to DataFormatVecPermute.
-type DataFormatVecPermuteAttr func(optionalAttr)
+// MfccAttr is an optional argument to Mfcc.
+type MfccAttr func(optionalAttr)
-// DataFormatVecPermuteSrcFormat sets the optional src_format attribute to value.
+// MfccUpperFrequencyLimit sets the optional upper_frequency_limit attribute to value.
//
-// value: source data format.
-// If not specified, defaults to "NHWC"
-func DataFormatVecPermuteSrcFormat(value string) DataFormatVecPermuteAttr {
+// value: The highest frequency to use when calculating the
+// ceptstrum.
+// If not specified, defaults to 4000
+func MfccUpperFrequencyLimit(value float32) MfccAttr {
return func(m optionalAttr) {
- m["src_format"] = value
+ m["upper_frequency_limit"] = value
}
}
-// DataFormatVecPermuteDstFormat sets the optional dst_format attribute to value.
+// MfccLowerFrequencyLimit sets the optional lower_frequency_limit attribute to value.
//
-// value: destination data format.
-// If not specified, defaults to "NCHW"
-func DataFormatVecPermuteDstFormat(value string) DataFormatVecPermuteAttr {
+// value: The lowest frequency to use when calculating the
+// ceptstrum.
+// If not specified, defaults to 20
+func MfccLowerFrequencyLimit(value float32) MfccAttr {
return func(m optionalAttr) {
- m["dst_format"] = value
+ m["lower_frequency_limit"] = value
}
}
-// Returns the permuted vector/tensor in the destination data format given the
+// MfccFilterbankChannelCount sets the optional filterbank_channel_count attribute to value.
//
-// one in the source data format.
+// value: Resolution of the Mel bank used internally.
+// If not specified, defaults to 40
+func MfccFilterbankChannelCount(value int64) MfccAttr {
+ return func(m optionalAttr) {
+ m["filterbank_channel_count"] = value
+ }
+}
+
+// MfccDctCoefficientCount sets the optional dct_coefficient_count attribute to value.
//
-// Arguments:
-// x: Vector of size 4 or Tensor of shape (4, 2) in source data format.
+// value: How many output channels to produce per time slice.
+// If not specified, defaults to 13
+func MfccDctCoefficientCount(value int64) MfccAttr {
+ return func(m optionalAttr) {
+ m["dct_coefficient_count"] = value
+ }
+}
+
+// Transforms a spectrogram into a form that's useful for speech recognition.
//
-// Returns Vector of size 4 or Tensor of shape (4, 2) in destination data format.
-func DataFormatVecPermute(scope *Scope, x tf.Output, optional ...DataFormatVecPermuteAttr) (y tf.Output) {
+// Mel Frequency Cepstral Coefficients are a way of representing audio data that's
+// been effective as an input feature for machine learning. They are created by
+// taking the spectrum of a spectrogram (a 'cepstrum'), and discarding some of the
+// higher frequencies that are less significant to the human ear. They have a long
+// history in the speech recognition world, and https://en.wikipedia.org/wiki/Mel-frequency_cepstrum
+// is a good resource to learn more.
+//
+// Arguments:
+// spectrogram: Typically produced by the Spectrogram op, with magnitude_squared
+// set to true.
+// sample_rate: How many samples per second the source audio used.
+func Mfcc(scope *Scope, spectrogram tf.Output, sample_rate tf.Output, optional ...MfccAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -14044,9 +14245,9 @@ func DataFormatVecPermute(scope *Scope, x tf.Output, optional ...DataFormatVecPe
a(attrs)
}
opspec := tf.OpSpec{
- Type: "DataFormatVecPermute",
+ Type: "Mfcc",
Input: []tf.Input{
- x,
+ spectrogram, sample_rate,
},
Attrs: attrs,
}
@@ -14054,24 +14255,29 @@ func DataFormatVecPermute(scope *Scope, x tf.Output, optional ...DataFormatVecPe
return op.Output(0)
}
-// Read an element from the TensorArray into output `value`.
+// Returns the element-wise sum of a list of tensors.
//
-// Arguments:
-// handle: The handle to a TensorArray.
+// `tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not
+// wait for all of its inputs to be ready before beginning to sum. This can
+// save memory if inputs are ready at different times, since minimum temporary
+// storage is proportional to the output size rather than the inputs size.
//
-// flow_in: A float scalar that enforces proper chaining of operations.
-// dtype: The type of the elem that is returned.
+// Unlike the original `accumulate_n`, `accumulate_n_v2` is differentiable.
//
-// Returns The tensor that is read from the TensorArray.
-func TensorArrayReadV3(scope *Scope, handle tf.Output, index tf.Output, flow_in tf.Output, dtype tf.DataType) (value tf.Output) {
+// Returns a `Tensor` of same shape and type as the elements of `inputs`.
+//
+// Arguments:
+// inputs: A list of `Tensor` objects, each with same shape and type.
+// shape: Shape of elements of `inputs`.
+func AccumulateNV2(scope *Scope, inputs []tf.Output, shape tf.Shape) (sum tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtype": dtype}
+ attrs := map[string]interface{}{"shape": shape}
opspec := tf.OpSpec{
- Type: "TensorArrayReadV3",
+ Type: "AccumulateNV2",
Input: []tf.Input{
- handle, index, flow_in,
+ tf.OutputList(inputs),
},
Attrs: attrs,
}
@@ -14079,97 +14285,106 @@ func TensorArrayReadV3(scope *Scope, handle tf.Output, index tf.Output, flow_in
return op.Output(0)
}
-// Adds up a SparseTensor and a dense Tensor, using these special rules:
+// Convert the quantized 'input' tensor into a lower-precision 'output', using the
//
-// (1) Broadcasts the dense side to have the same shape as the sparse side, if
-// eligible;
-// (2) Then, only the dense values pointed to by the indices of the SparseTensor
-// participate in the cwise addition.
+// actual distribution of the values to maximize the usage of the lower bit depth
+// and adjusting the output min and max ranges accordingly.
//
-// By these rules, the result is a logical SparseTensor with exactly the same
-// indices and shape, but possibly with different non-zero values. The output of
-// this Op is the resultant non-zero values.
+// [input_min, input_max] are scalar floats that specify the range for the float
+// interpretation of the 'input' data. For example, if input_min is -1.0f and
+// input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0
+// value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
+//
+// This operator tries to squeeze as much precision as possible into an output with
+// a lower bit depth by calculating the actual min and max values found in the
+// data. For example, maybe that quint16 input has no values lower than 16,384 and
+// none higher than 49,152. That means only half the range is actually needed, all
+// the float interpretations are between -0.5f and 0.5f, so if we want to compress
+// the data into a quint8 output, we can use that range rather than the theoretical
+// -1.0f to 1.0f that is suggested by the input min and max.
+//
+// In practice, this is most useful for taking output from operations like
+// QuantizedMatMul that can produce higher bit-depth outputs than their inputs and
+// may have large potential output ranges, but in practice have a distribution of
+// input values that only uses a small fraction of the possible range. By feeding
+// that output into this operator, we can reduce it from 32 bits down to 8 with
+// minimal loss of accuracy.
//
// Arguments:
-// sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
-// SparseTensor, possibly not in canonical ordering.
-// sp_values: 1-D. `N` non-empty values corresponding to `sp_indices`.
-// sp_shape: 1-D. Shape of the input SparseTensor.
-// dense: `R`-D. The dense Tensor operand.
//
-// Returns 1-D. The `N` values that are operated on.
-func SparseDenseCwiseAdd(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output, dense tf.Output) (output tf.Output) {
+// input_min: The float value that the minimum quantized input value represents.
+// input_max: The float value that the maximum quantized input value represents.
+// out_type: The type of the output. Should be a lower bit depth than Tinput.
+//
+// Returns The float value that the minimum quantized output value represents.The float value that the maximum quantized output value represents.
+func QuantizeDownAndShrinkRange(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, out_type tf.DataType) (output tf.Output, output_min tf.Output, output_max tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"out_type": out_type}
opspec := tf.OpSpec{
- Type: "SparseDenseCwiseAdd",
+ Type: "QuantizeDownAndShrinkRange",
Input: []tf.Input{
- sp_indices, sp_values, sp_shape, dense,
+ input, input_min, input_max,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// Conv3DAttr is an optional argument to Conv3D.
-type Conv3DAttr func(optionalAttr)
+// RandomGammaAttr is an optional argument to RandomGamma.
+type RandomGammaAttr func(optionalAttr)
-// Conv3DDataFormat sets the optional data_format attribute to value.
+// RandomGammaSeed sets the optional seed attribute to value.
//
-// value: The data format of the input and output data. With the
-// default format "NDHWC", the data is stored in the order of:
-// [batch, in_depth, in_height, in_width, in_channels].
-// Alternatively, the format could be "NCDHW", the data storage order is:
-// [batch, in_channels, in_depth, in_height, in_width].
-// If not specified, defaults to "NDHWC"
-func Conv3DDataFormat(value string) Conv3DAttr {
+// value: If either `seed` or `seed2` are set to be non-zero, the random number
+// generator is seeded by the given seed. Otherwise, it is seeded by a
+// random seed.
+// If not specified, defaults to 0
+func RandomGammaSeed(value int64) RandomGammaAttr {
return func(m optionalAttr) {
- m["data_format"] = value
+ m["seed"] = value
}
}
-// Conv3DDilations sets the optional dilations attribute to value.
+// RandomGammaSeed2 sets the optional seed2 attribute to value.
//
-// value: 1-D tensor of length 5. The dilation factor for each dimension of
-// `input`. If set to k > 1, there will be k-1 skipped cells between each
-// filter element on that dimension. The dimension order is determined by the
-// value of `data_format`, see above for details. Dilations in the batch and
-// depth dimensions must be 1.
-// If not specified, defaults to <i:1 i:1 i:1 i:1 i:1 >
-func Conv3DDilations(value []int64) Conv3DAttr {
+// value: A second seed to avoid seed collision.
+// If not specified, defaults to 0
+func RandomGammaSeed2(value int64) RandomGammaAttr {
return func(m optionalAttr) {
- m["dilations"] = value
+ m["seed2"] = value
}
}
-// Computes a 3-D convolution given 5-D `input` and `filter` tensors.
-//
-// In signal processing, cross-correlation is a measure of similarity of
-// two waveforms as a function of a time-lag applied to one of them. This
-// is also known as a sliding dot product or sliding inner-product.
+// Outputs random values from the Gamma distribution(s) described by alpha.
//
-// Our Conv3D implements a form of cross-correlation.
+// This op uses the algorithm by Marsaglia et al. to acquire samples via
+// transformation-rejection from pairs of uniform and normal random variables.
+// See http://dl.acm.org/citation.cfm?id=358414
//
// Arguments:
-// input: Shape `[batch, in_depth, in_height, in_width, in_channels]`.
-// filter: Shape `[filter_depth, filter_height, filter_width, in_channels,
-// out_channels]`. `in_channels` must match between `input` and `filter`.
-// strides: 1-D tensor of length 5. The stride of the sliding window for each
-// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
-// padding: The type of padding algorithm to use.
-func Conv3D(scope *Scope, input tf.Output, filter tf.Output, strides []int64, padding string, optional ...Conv3DAttr) (output tf.Output) {
+// shape: 1-D integer tensor. Shape of independent samples to draw from each
+// distribution described by the shape parameters given in alpha.
+// alpha: A tensor in which each scalar is a "shape" parameter describing the
+// associated gamma distribution.
+//
+// Returns A tensor with shape `shape + shape(alpha)`. Each slice
+// `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for
+// `alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha.
+func RandomGamma(scope *Scope, shape tf.Output, alpha tf.Output, optional ...RandomGammaAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"strides": strides, "padding": padding}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "Conv3D",
+ Type: "RandomGamma",
Input: []tf.Input{
- input, filter,
+ shape, alpha,
},
Attrs: attrs,
}
@@ -14177,301 +14392,263 @@ func Conv3D(scope *Scope, input tf.Output, filter tf.Output, strides []int64, pa
return op.Output(0)
}
-// Returns the truth value of (x >= y) element-wise.
-//
-// *NOTE*: `GreaterEqual` supports broadcasting. More about broadcasting
-// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-func GreaterEqual(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "GreaterEqual",
- Input: []tf.Input{
- x, y,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// ResourceApplyMomentumAttr is an optional argument to ResourceApplyMomentum.
-type ResourceApplyMomentumAttr func(optionalAttr)
-
-// ResourceApplyMomentumUseLocking sets the optional use_locking attribute to value.
-//
-// value: If `True`, updating of the var and accum tensors will be protected
-// by a lock; otherwise the behavior is undefined, but may exhibit less
-// contention.
-// If not specified, defaults to false
-func ResourceApplyMomentumUseLocking(value bool) ResourceApplyMomentumAttr {
- return func(m optionalAttr) {
- m["use_locking"] = value
- }
-}
+// AvgPool3DGradAttr is an optional argument to AvgPool3DGrad.
+type AvgPool3DGradAttr func(optionalAttr)
-// ResourceApplyMomentumUseNesterov sets the optional use_nesterov attribute to value.
+// AvgPool3DGradDataFormat sets the optional data_format attribute to value.
//
-// value: If `True`, the tensor passed to compute grad will be
-// var - lr * momentum * accum, so in the end, the var you get is actually
-// var - lr * momentum * accum.
-// If not specified, defaults to false
-func ResourceApplyMomentumUseNesterov(value bool) ResourceApplyMomentumAttr {
+// value: The data format of the input and output data. With the
+// default format "NDHWC", the data is stored in the order of:
+// [batch, in_depth, in_height, in_width, in_channels].
+// Alternatively, the format could be "NCDHW", the data storage order is:
+// [batch, in_channels, in_depth, in_height, in_width].
+// If not specified, defaults to "NDHWC"
+func AvgPool3DGradDataFormat(value string) AvgPool3DGradAttr {
return func(m optionalAttr) {
- m["use_nesterov"] = value
+ m["data_format"] = value
}
}
-// Update '*var' according to the momentum scheme. Set use_nesterov = True if you
-//
-// want to use Nesterov momentum.
-//
-// accum = accum * momentum + grad
-// var -= lr * accum
+// Computes gradients of average pooling function.
//
// Arguments:
-// var_: Should be from a Variable().
-// accum: Should be from a Variable().
-// lr: Scaling factor. Must be a scalar.
-// grad: The gradient.
-// momentum: Momentum. Must be a scalar.
+// orig_input_shape: The original input dimensions.
+// grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.
+// ksize: 1-D tensor of length 5. The size of the window for each dimension of
+// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
+// strides: 1-D tensor of length 5. The stride of the sliding window for each
+// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
+// padding: The type of padding algorithm to use.
//
-// Returns the created operation.
-func ResourceApplyMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, momentum tf.Output, optional ...ResourceApplyMomentumAttr) (o *tf.Operation) {
+// Returns The backprop for input.
+func AvgPool3DGrad(scope *Scope, orig_input_shape tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPool3DGradAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ResourceApplyMomentum",
+ Type: "AvgPool3DGrad",
Input: []tf.Input{
- var_, accum, lr, grad, momentum,
+ orig_input_shape, grad,
},
Attrs: attrs,
}
- return scope.AddOperation(opspec)
-}
-
-// Returns element-wise integer closest to x.
-//
-// If the result is midway between two representable values,
-// the even representable is chosen.
-// For example:
-//
-// ```
-// rint(-1.5) ==> -2.0
-// rint(0.5000001) ==> 1.0
-// rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.]
-// ```
-func Rint(scope *Scope, x tf.Output) (y tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "Rint",
- Input: []tf.Input{
- x,
- },
- }
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// QuantizeV2Attr is an optional argument to QuantizeV2.
-type QuantizeV2Attr func(optionalAttr)
+// ParseSingleSequenceExampleAttr is an optional argument to ParseSingleSequenceExample.
+type ParseSingleSequenceExampleAttr func(optionalAttr)
-// QuantizeV2Mode sets the optional mode attribute to value.
-// If not specified, defaults to "MIN_COMBINED"
-func QuantizeV2Mode(value string) QuantizeV2Attr {
+// ParseSingleSequenceExampleContextSparseTypes sets the optional context_sparse_types attribute to value.
+//
+// value: A list of Ncontext_sparse types; the data types of data in
+// each context Feature given in context_sparse_keys.
+// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
+// DT_INT64 (Int64List), and DT_STRING (BytesList).
+// If not specified, defaults to <>
+//
+// REQUIRES: len(value) >= 0
+func ParseSingleSequenceExampleContextSparseTypes(value []tf.DataType) ParseSingleSequenceExampleAttr {
return func(m optionalAttr) {
- m["mode"] = value
+ m["context_sparse_types"] = value
}
}
-// QuantizeV2RoundMode sets the optional round_mode attribute to value.
-// If not specified, defaults to "HALF_AWAY_FROM_ZERO"
-func QuantizeV2RoundMode(value string) QuantizeV2Attr {
+// ParseSingleSequenceExampleFeatureListDenseTypes sets the optional feature_list_dense_types attribute to value.
+// If not specified, defaults to <>
+//
+// REQUIRES: len(value) >= 0
+func ParseSingleSequenceExampleFeatureListDenseTypes(value []tf.DataType) ParseSingleSequenceExampleAttr {
return func(m optionalAttr) {
- m["round_mode"] = value
+ m["feature_list_dense_types"] = value
}
}
-// Quantize the 'input' tensor of type float to 'output' tensor of type 'T'.
-//
-// [min_range, max_range] are scalar floats that specify the range for
-// the 'input' data. The 'mode' attribute controls exactly which calculations are
-// used to convert the float values to their quantized equivalents. The
-// 'round_mode' attribute controls which rounding tie-breaking algorithm is used
-// when rounding float values to their quantized equivalents.
-//
-// In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
-//
-// ```
-// out[i] = (in[i] - min_range) * range(T) / (max_range - min_range)
-// if T == qint8, out[i] -= (range(T) + 1) / 2.0
-// ```
-// here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
-//
-// *MIN_COMBINED Mode Example*
-//
-// Assume the input is type float and has a possible range of [0.0, 6.0] and the
-// output type is quint8 ([0, 255]). The min_range and max_range values should be
-// specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each
-// value of the input by 255/6 and cast to quint8.
-//
-// If the output type was qint8 ([-128, 127]), the operation will additionally
-// subtract each value by 128 prior to casting, so that the range of values aligns
-// with the range of qint8.
-//
-// If the mode is 'MIN_FIRST', then this approach is used:
-//
-// ```
-// num_discrete_values = 1 << (# of bits in T)
-// range_adjust = num_discrete_values / (num_discrete_values - 1)
-// range = (range_max - range_min) * range_adjust
-// range_scale = num_discrete_values / range
-// quantized = round(input * range_scale) - round(range_min * range_scale) +
-// numeric_limits<T>::min()
-// quantized = max(quantized, numeric_limits<T>::min())
-// quantized = min(quantized, numeric_limits<T>::max())
-// ```
-//
-// The biggest difference between this and MIN_COMBINED is that the minimum range
-// is rounded first, before it's subtracted from the rounded value. With
-// MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing
-// and dequantizing will introduce a larger and larger error.
-//
-// *SCALED mode Example*
-//
-// `SCALED` mode matches the quantization approach used in
-// `QuantizeAndDequantize{V2|V3}`.
-//
-// If the mode is `SCALED`, we do not use the full range of the output type,
-// choosing to elide the lowest possible value for symmetry (e.g., output range is
-// -127 to 127, not -128 to 127 for signed 8 bit quantization), so that 0.0 maps to
-// 0.
-//
-// We first find the range of values in our tensor. The
-// range we use is always centered on 0, so we find m such that
-// ```c++
-// m = max(abs(input_min), abs(input_max))
-// ```
+// ParseSingleSequenceExampleContextDenseShapes sets the optional context_dense_shapes attribute to value.
//
-// Our input tensor range is then `[-m, m]`.
+// value: A list of Ncontext_dense shapes; the shapes of data in
+// each context Feature given in context_dense_keys.
+// The number of elements in the Feature corresponding to context_dense_key[j]
+// must always equal context_dense_shapes[j].NumEntries().
+// The shape of context_dense_values[j] will match context_dense_shapes[j].
+// If not specified, defaults to <>
//
-// Next, we choose our fixed-point quantization buckets, `[min_fixed, max_fixed]`.
-// If T is signed, this is
-// ```
-// num_bits = sizeof(T) * 8
-// [min_fixed, max_fixed] =
-// [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1]
-// ```
+// REQUIRES: len(value) >= 0
+func ParseSingleSequenceExampleContextDenseShapes(value []tf.Shape) ParseSingleSequenceExampleAttr {
+ return func(m optionalAttr) {
+ m["context_dense_shapes"] = value
+ }
+}
+
+// ParseSingleSequenceExampleFeatureListSparseTypes sets the optional feature_list_sparse_types attribute to value.
//
-// Otherwise, if T is unsigned, the fixed-point range is
-// ```
-// [min_fixed, max_fixed] = [0, (1 << num_bits) - 1]
-// ```
+// value: A list of Nfeature_list_sparse types; the data types
+// of data in each FeatureList given in feature_list_sparse_keys.
+// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
+// DT_INT64 (Int64List), and DT_STRING (BytesList).
+// If not specified, defaults to <>
//
-// From this we compute our scaling factor, s:
-// ```c++
-// s = (max_fixed - min_fixed) / (2 * m)
-// ```
+// REQUIRES: len(value) >= 0
+func ParseSingleSequenceExampleFeatureListSparseTypes(value []tf.DataType) ParseSingleSequenceExampleAttr {
+ return func(m optionalAttr) {
+ m["feature_list_sparse_types"] = value
+ }
+}
+
+// ParseSingleSequenceExampleFeatureListDenseShapes sets the optional feature_list_dense_shapes attribute to value.
//
-// Now we can quantize the elements of our tensor:
-// ```c++
-// result = round(input * s)
-// ```
+// value: A list of Nfeature_list_dense shapes; the shapes of
+// data in each FeatureList given in feature_list_dense_keys.
+// The shape of each Feature in the FeatureList corresponding to
+// feature_list_dense_key[j] must always equal
+// feature_list_dense_shapes[j].NumEntries().
+// If not specified, defaults to <>
//
-// One thing to watch out for is that the operator may choose to adjust the
-// requested minimum and maximum values slightly during the quantization process,
-// so you should always use the output ports as the range for further calculations.
-// For example, if the requested minimum and maximum values are close to equal,
-// they will be separated by a small epsilon value to prevent ill-formed quantized
-// buffers from being created. Otherwise, you can end up with buffers where all the
-// quantized values map to the same float value, which causes problems for
-// operations that have to perform further calculations on them.
+// REQUIRES: len(value) >= 0
+func ParseSingleSequenceExampleFeatureListDenseShapes(value []tf.Shape) ParseSingleSequenceExampleAttr {
+ return func(m optionalAttr) {
+ m["feature_list_dense_shapes"] = value
+ }
+}
+
+// Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors.
//
// Arguments:
-//
-// min_range: The minimum scalar value possibly produced for the input.
-// max_range: The maximum scalar value possibly produced for the input.
-//
-//
-// Returns The quantized data produced from the float input.The actual minimum scalar value used for the output.The actual maximum scalar value used for the output.
-func QuantizeV2(scope *Scope, input tf.Output, min_range tf.Output, max_range tf.Output, T tf.DataType, optional ...QuantizeV2Attr) (output tf.Output, output_min tf.Output, output_max tf.Output) {
+// serialized: A scalar containing a binary serialized SequenceExample proto.
+// feature_list_dense_missing_assumed_empty: A vector listing the
+// FeatureList keys which may be missing from the SequenceExample. If the
+// associated FeatureList is missing, it is treated as empty. By default,
+// any FeatureList not listed in this vector must exist in the SequenceExample.
+// context_sparse_keys: A list of Ncontext_sparse string Tensors (scalars).
+// The keys expected in the Examples' features associated with context_sparse
+// values.
+// context_dense_keys: A list of Ncontext_dense string Tensors (scalars).
+// The keys expected in the SequenceExamples' context features associated with
+// dense values.
+// feature_list_sparse_keys: A list of Nfeature_list_sparse string Tensors
+// (scalars). The keys expected in the FeatureLists associated with sparse
+// values.
+// feature_list_dense_keys: A list of Nfeature_list_dense string Tensors (scalars).
+// The keys expected in the SequenceExamples' feature_lists associated
+// with lists of dense values.
+// context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty).
+// context_dense_defaults[j] provides default values
+// when the SequenceExample's context map lacks context_dense_key[j].
+// If an empty Tensor is provided for context_dense_defaults[j],
+// then the Feature context_dense_keys[j] is required.
+// The input type is inferred from context_dense_defaults[j], even when it's
+// empty. If context_dense_defaults[j] is not empty, its shape must match
+// context_dense_shapes[j].
+// debug_name: A scalar containing the name of the serialized proto.
+// May contain, for example, table key (descriptive) name for the
+// corresponding serialized proto. This is purely useful for debugging
+// purposes, and the presence of values here has no effect on the output.
+// May also be an empty scalar if no name is available.
+func ParseSingleSequenceExample(scope *Scope, serialized tf.Output, feature_list_dense_missing_assumed_empty tf.Output, context_sparse_keys []tf.Output, context_dense_keys []tf.Output, feature_list_sparse_keys []tf.Output, feature_list_dense_keys []tf.Output, context_dense_defaults []tf.Output, debug_name tf.Output, optional ...ParseSingleSequenceExampleAttr) (context_sparse_indices []tf.Output, context_sparse_values []tf.Output, context_sparse_shapes []tf.Output, context_dense_values []tf.Output, feature_list_sparse_indices []tf.Output, feature_list_sparse_values []tf.Output, feature_list_sparse_shapes []tf.Output, feature_list_dense_values []tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"T": T}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "QuantizeV2",
+ Type: "ParseSingleSequenceExample",
Input: []tf.Input{
- input, min_range, max_range,
+ serialized, feature_list_dense_missing_assumed_empty, tf.OutputList(context_sparse_keys), tf.OutputList(context_dense_keys), tf.OutputList(feature_list_sparse_keys), tf.OutputList(feature_list_dense_keys), tf.OutputList(context_dense_defaults), debug_name,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
+ if scope.Err() != nil {
+ return
+ }
+ var idx int
+ var err error
+ if context_sparse_indices, idx, err = makeOutputList(op, idx, "context_sparse_indices"); err != nil {
+ scope.UpdateErr("ParseSingleSequenceExample", err)
+ return
+ }
+ if context_sparse_values, idx, err = makeOutputList(op, idx, "context_sparse_values"); err != nil {
+ scope.UpdateErr("ParseSingleSequenceExample", err)
+ return
+ }
+ if context_sparse_shapes, idx, err = makeOutputList(op, idx, "context_sparse_shapes"); err != nil {
+ scope.UpdateErr("ParseSingleSequenceExample", err)
+ return
+ }
+ if context_dense_values, idx, err = makeOutputList(op, idx, "context_dense_values"); err != nil {
+ scope.UpdateErr("ParseSingleSequenceExample", err)
+ return
+ }
+ if feature_list_sparse_indices, idx, err = makeOutputList(op, idx, "feature_list_sparse_indices"); err != nil {
+ scope.UpdateErr("ParseSingleSequenceExample", err)
+ return
+ }
+ if feature_list_sparse_values, idx, err = makeOutputList(op, idx, "feature_list_sparse_values"); err != nil {
+ scope.UpdateErr("ParseSingleSequenceExample", err)
+ return
+ }
+ if feature_list_sparse_shapes, idx, err = makeOutputList(op, idx, "feature_list_sparse_shapes"); err != nil {
+ scope.UpdateErr("ParseSingleSequenceExample", err)
+ return
+ }
+ if feature_list_dense_values, idx, err = makeOutputList(op, idx, "feature_list_dense_values"); err != nil {
+ scope.UpdateErr("ParseSingleSequenceExample", err)
+ return
+ }
+ return context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values
}
-// DepthwiseConv2dNativeBackpropFilterAttr is an optional argument to DepthwiseConv2dNativeBackpropFilter.
-type DepthwiseConv2dNativeBackpropFilterAttr func(optionalAttr)
+// QuantizedConv2DAttr is an optional argument to QuantizedConv2D.
+type QuantizedConv2DAttr func(optionalAttr)
-// DepthwiseConv2dNativeBackpropFilterDataFormat sets the optional data_format attribute to value.
-//
-// value: Specify the data format of the input and output data. With the
-// default format "NHWC", the data is stored in the order of:
-// [batch, height, width, channels].
-// Alternatively, the format could be "NCHW", the data storage order of:
-// [batch, channels, height, width].
-// If not specified, defaults to "NHWC"
-func DepthwiseConv2dNativeBackpropFilterDataFormat(value string) DepthwiseConv2dNativeBackpropFilterAttr {
+// QuantizedConv2DOutType sets the optional out_type attribute to value.
+// If not specified, defaults to DT_QINT32
+func QuantizedConv2DOutType(value tf.DataType) QuantizedConv2DAttr {
return func(m optionalAttr) {
- m["data_format"] = value
+ m["out_type"] = value
}
}
-// DepthwiseConv2dNativeBackpropFilterDilations sets the optional dilations attribute to value.
+// QuantizedConv2DDilations sets the optional dilations attribute to value.
//
// value: 1-D tensor of length 4. The dilation factor for each dimension of
-// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
-// element on that dimension. The dimension order is determined by the value of
-// `data_format`, see above for details. Dilations in the batch and depth
-// dimensions must be 1.
+// `input`. If set to k > 1, there will be k-1 skipped cells between each
+// filter element on that dimension. The dimension order is determined by the
+// value of `data_format`, see above for details. Dilations in the batch and
+// depth dimensions must be 1.
// If not specified, defaults to <i:1 i:1 i:1 i:1 >
-func DepthwiseConv2dNativeBackpropFilterDilations(value []int64) DepthwiseConv2dNativeBackpropFilterAttr {
+func QuantizedConv2DDilations(value []int64) QuantizedConv2DAttr {
return func(m optionalAttr) {
m["dilations"] = value
}
}
-// Computes the gradients of depthwise convolution with respect to the filter.
+// Computes a 2D convolution given quantized 4D input and filter tensors.
+//
+// The inputs are quantized tensors where the lowest value represents the real
+// number of the associated minimum, and the highest represents the maximum.
+// This means that you can only interpret the quantized output in the same way, by
+// taking the returned minimum and maximum values into account.
//
// Arguments:
-// input: 4-D with shape based on `data_format`. For example, if
-// `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height,
-// in_width, in_channels]` tensor.
-// filter_sizes: An integer vector representing the tensor shape of `filter`,
-// where `filter` is a 4-D
-// `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.
-// out_backprop: 4-D with shape based on `data_format`.
-// For example, if `data_format` is 'NHWC' then
-// out_backprop shape is `[batch, out_height, out_width, out_channels]`.
-// Gradients w.r.t. the output of the convolution.
+//
+// filter: filter's input_depth dimension must match input's depth dimensions.
+// min_input: The float value that the lowest quantized input value represents.
+// max_input: The float value that the highest quantized input value represents.
+// min_filter: The float value that the lowest quantized filter value represents.
+// max_filter: The float value that the highest quantized filter value represents.
// strides: The stride of the sliding window for each dimension of the input
-// of the convolution.
+// tensor.
// padding: The type of padding algorithm to use.
//
-// Returns 4-D with shape
-// `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t.
-// the `filter` input of the convolution.
-func DepthwiseConv2dNativeBackpropFilter(scope *Scope, input tf.Output, filter_sizes tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...DepthwiseConv2dNativeBackpropFilterAttr) (output tf.Output) {
+// Returns The float value that the lowest quantized output value represents.The float value that the highest quantized output value represents.
+func QuantizedConv2D(scope *Scope, input tf.Output, filter tf.Output, min_input tf.Output, max_input tf.Output, min_filter tf.Output, max_filter tf.Output, strides []int64, padding string, optional ...QuantizedConv2DAttr) (output tf.Output, min_output tf.Output, max_output tf.Output) {
if scope.Err() != nil {
return
}
@@ -14480,119 +14657,124 @@ func DepthwiseConv2dNativeBackpropFilter(scope *Scope, input tf.Output, filter_s
a(attrs)
}
opspec := tf.OpSpec{
- Type: "DepthwiseConv2dNativeBackpropFilter",
+ Type: "QuantizedConv2D",
Input: []tf.Input{
- input, filter_sizes, out_backprop,
+ input, filter, min_input, max_input, min_filter, max_filter,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// Shuffle dimensions of x according to a permutation.
+// ResourceGatherAttr is an optional argument to ResourceGather.
+type ResourceGatherAttr func(optionalAttr)
+
+// ResourceGatherValidateIndices sets the optional validate_indices attribute to value.
+// If not specified, defaults to true
+func ResourceGatherValidateIndices(value bool) ResourceGatherAttr {
+ return func(m optionalAttr) {
+ m["validate_indices"] = value
+ }
+}
+
+// Gather slices from the variable pointed to by `resource` according to `indices`.
//
-// The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
-// `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
-func Transpose(scope *Scope, x tf.Output, perm tf.Output) (y tf.Output) {
+// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
+// Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
+//
+// ```python
+// # Scalar indices
+// output[:, ..., :] = params[indices, :, ... :]
+//
+// # Vector indices
+// output[i, :, ..., :] = params[indices[i], :, ... :]
+//
+// # Higher rank indices
+// output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
+// ```
+func ResourceGather(scope *Scope, resource tf.Output, indices tf.Output, dtype tf.DataType, optional ...ResourceGatherAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"dtype": dtype}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "Transpose",
+ Type: "ResourceGather",
Input: []tf.Input{
- x, perm,
+ resource, indices,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Reads and outputs the entire contents of the input filename.
-func ReadFile(scope *Scope, filename tf.Output) (contents tf.Output) {
+// Delete the TensorArray from its resource container.
+//
+// This enables the user to close and release the resource in the middle
+// of a step/run.
+//
+// Arguments:
+// handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
+//
+// Returns the created operation.
+func TensorArrayCloseV3(scope *Scope, handle tf.Output) (o *tf.Operation) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "ReadFile",
+ Type: "TensorArrayCloseV3",
Input: []tf.Input{
- filename,
+ handle,
},
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// AddManySparseToTensorsMapAttr is an optional argument to AddManySparseToTensorsMap.
-type AddManySparseToTensorsMapAttr func(optionalAttr)
-
-// AddManySparseToTensorsMapContainer sets the optional container attribute to value.
-//
-// value: The container name for the `SparseTensorsMap` created by this op.
-// If not specified, defaults to ""
-func AddManySparseToTensorsMapContainer(value string) AddManySparseToTensorsMapAttr {
- return func(m optionalAttr) {
- m["container"] = value
- }
-}
+// MaxPoolGradGradAttr is an optional argument to MaxPoolGradGrad.
+type MaxPoolGradGradAttr func(optionalAttr)
-// AddManySparseToTensorsMapSharedName sets the optional shared_name attribute to value.
+// MaxPoolGradGradDataFormat sets the optional data_format attribute to value.
//
-// value: The shared name for the `SparseTensorsMap` created by this op.
-// If blank, the new Operation's unique name is used.
-// If not specified, defaults to ""
-func AddManySparseToTensorsMapSharedName(value string) AddManySparseToTensorsMapAttr {
+// value: Specify the data format of the input and output data. With the
+// default format "NHWC", the data is stored in the order of:
+// [batch, in_height, in_width, in_channels].
+// Alternatively, the format could be "NCHW", the data storage order of:
+// [batch, in_channels, in_height, in_width].
+// If not specified, defaults to "NHWC"
+func MaxPoolGradGradDataFormat(value string) MaxPoolGradGradAttr {
return func(m optionalAttr) {
- m["shared_name"] = value
+ m["data_format"] = value
}
}
-// Add an `N`-minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles.
-//
-// A `SparseTensor` of rank `R` is represented by three tensors: `sparse_indices`,
-// `sparse_values`, and `sparse_shape`, where
-//
-// ```sparse_indices.shape[1] == sparse_shape.shape[0] == R```
-//
-// An `N`-minibatch of `SparseTensor` objects is represented as a `SparseTensor`
-// having a first `sparse_indices` column taking values between `[0, N)`, where
-// the minibatch size `N == sparse_shape[0]`.
-//
-// The input `SparseTensor` must have rank `R` greater than 1, and the first
-// dimension is treated as the minibatch dimension. Elements of the `SparseTensor`
-// must be sorted in increasing order of this first dimension. The stored
-// `SparseTensor` objects pointed to by each row of the output `sparse_handles`
-// will have rank `R-1`.
-//
-// The `SparseTensor` values can then be read out as part of a minibatch by passing
-// the given keys as vector elements to `TakeManySparseFromTensorsMap`. To ensure
-// the correct `SparseTensorsMap` is accessed, ensure that the same
-// `container` and `shared_name` are passed to that Op. If no `shared_name`
-// is provided here, instead use the *name* of the Operation created by calling
-// `AddManySparseToTensorsMap` as the `shared_name` passed to
-// `TakeManySparseFromTensorsMap`. Ensure the Operations are colocated.
+// Computes second-order gradients of the maxpooling function.
//
// Arguments:
-// sparse_indices: 2-D. The `indices` of the minibatch `SparseTensor`.
-// `sparse_indices[:, 0]` must be ordered values in `[0, N)`.
-// sparse_values: 1-D. The `values` of the minibatch `SparseTensor`.
-// sparse_shape: 1-D. The `shape` of the minibatch `SparseTensor`.
-// The minibatch size `N == sparse_shape[0]`.
+// orig_input: The original input tensor.
+// orig_output: The original output tensor.
+// grad: 4-D. Gradients of gradients w.r.t. the input of `max_pool`.
+// ksize: The size of the window for each dimension of the input tensor.
+// strides: The stride of the sliding window for each dimension of the
+// input tensor.
+// padding: The type of padding algorithm to use.
//
-// Returns 1-D. The handles of the `SparseTensor` now stored in the
-// `SparseTensorsMap`. Shape: `[N]`.
-func AddManySparseToTensorsMap(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...AddManySparseToTensorsMapAttr) (sparse_handles tf.Output) {
+// Returns Gradients of gradients w.r.t. the input to `max_pool`.
+func MaxPoolGradGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolGradGradAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "AddManySparseToTensorsMap",
+ Type: "MaxPoolGradGrad",
Input: []tf.Input{
- sparse_indices, sparse_values, sparse_shape,
+ orig_input, orig_output, grad,
},
Attrs: attrs,
}
@@ -14600,65 +14782,48 @@ func AddManySparseToTensorsMap(scope *Scope, sparse_indices tf.Output, sparse_va
return op.Output(0)
}
-// Creates a dataset that emits the outputs of `input_dataset` `count` times.
-//
-// Arguments:
-//
-// count: A scalar representing the number of times that `input_dataset` should
-// be repeated. A value of `-1` indicates that it should be repeated infinitely.
-//
+// RandomUniformIntAttr is an optional argument to RandomUniformInt.
+type RandomUniformIntAttr func(optionalAttr)
+
+// RandomUniformIntSeed sets the optional seed attribute to value.
//
-func RepeatDataset(scope *Scope, input_dataset tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
- opspec := tf.OpSpec{
- Type: "RepeatDataset",
- Input: []tf.Input{
- input_dataset, count,
- },
- Attrs: attrs,
+// value: If either `seed` or `seed2` are set to be non-zero, the random number
+// generator is seeded by the given seed. Otherwise, it is seeded by a
+// random seed.
+// If not specified, defaults to 0
+func RandomUniformIntSeed(value int64) RandomUniformIntAttr {
+ return func(m optionalAttr) {
+ m["seed"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// SparseReduceMaxSparseAttr is an optional argument to SparseReduceMaxSparse.
-type SparseReduceMaxSparseAttr func(optionalAttr)
-
-// SparseReduceMaxSparseKeepDims sets the optional keep_dims attribute to value.
+// RandomUniformIntSeed2 sets the optional seed2 attribute to value.
//
-// value: If true, retain reduced dimensions with length 1.
-// If not specified, defaults to false
-func SparseReduceMaxSparseKeepDims(value bool) SparseReduceMaxSparseAttr {
+// value: A second seed to avoid seed collision.
+// If not specified, defaults to 0
+func RandomUniformIntSeed2(value int64) RandomUniformIntAttr {
return func(m optionalAttr) {
- m["keep_dims"] = value
+ m["seed2"] = value
}
}
-// Computes the max of elements across dimensions of a SparseTensor.
-//
-// This Op takes a SparseTensor and is the sparse counterpart to
-// `tf.reduce_max()`. In contrast to SparseReduceMax, this Op returns a
-// SparseTensor.
+// Outputs random integers from a uniform distribution.
//
-// Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
-// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
-// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
-// with length 1.
+// The generated values are uniform integers in the range `[minval, maxval)`.
+// The lower bound `minval` is included in the range, while the upper bound
+// `maxval` is excluded.
//
-// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
-// with a single element is returned. Additionally, the axes can be negative,
-// which are interpreted according to the indexing rules in Python.
+// The random integers are slightly biased unless `maxval - minval` is an exact
+// power of two. The bias is small for values of `maxval - minval` significantly
+// smaller than the range of the output (either `2^32` or `2^64`).
//
// Arguments:
-// input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
-// SparseTensor, possibly not in canonical ordering.
-// input_values: 1-D. `N` non-empty values corresponding to `input_indices`.
-// input_shape: 1-D. Shape of the input SparseTensor.
-// reduction_axes: 1-D. Length-`K` vector containing the reduction axes.
-func SparseReduceMaxSparse(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceMaxSparseAttr) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
+// shape: The shape of the output tensor.
+// minval: 0-D. Inclusive lower bound on the generated integers.
+// maxval: 0-D. Exclusive upper bound on the generated integers.
+//
+// Returns A tensor of the specified shape filled with uniform random integers.
+func RandomUniformInt(scope *Scope, shape tf.Output, minval tf.Output, maxval tf.Output, optional ...RandomUniformIntAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -14667,96 +14832,97 @@ func SparseReduceMaxSparse(scope *Scope, input_indices tf.Output, input_values t
a(attrs)
}
opspec := tf.OpSpec{
- Type: "SparseReduceMaxSparse",
+ Type: "RandomUniformInt",
Input: []tf.Input{
- input_indices, input_values, input_shape, reduction_axes,
+ shape, minval, maxval,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
+ return op.Output(0)
}
-// ResourceApplyAdagradDAAttr is an optional argument to ResourceApplyAdagradDA.
-type ResourceApplyAdagradDAAttr func(optionalAttr)
+// SkipgramAttr is an optional argument to Skipgram.
+type SkipgramAttr func(optionalAttr)
-// ResourceApplyAdagradDAUseLocking sets the optional use_locking attribute to value.
+// SkipgramWindowSize sets the optional window_size attribute to value.
//
-// value: If True, updating of the var and accum tensors will be protected by
-// a lock; otherwise the behavior is undefined, but may exhibit less contention.
-// If not specified, defaults to false
-func ResourceApplyAdagradDAUseLocking(value bool) ResourceApplyAdagradDAAttr {
+// value: The number of words to predict to the left and right of the target.
+// If not specified, defaults to 5
+func SkipgramWindowSize(value int64) SkipgramAttr {
return func(m optionalAttr) {
- m["use_locking"] = value
+ m["window_size"] = value
}
}
-// Update '*var' according to the proximal adagrad scheme.
+// SkipgramMinCount sets the optional min_count attribute to value.
+//
+// value: The minimum number of word occurrences for it to be included in the
+// vocabulary.
+// If not specified, defaults to 5
+func SkipgramMinCount(value int64) SkipgramAttr {
+ return func(m optionalAttr) {
+ m["min_count"] = value
+ }
+}
+
+// SkipgramSubsample sets the optional subsample attribute to value.
+//
+// value: Threshold for word occurrence. Words that appear with higher
+// frequency will be randomly down-sampled. Set to 0 to disable.
+// If not specified, defaults to 0.001
+func SkipgramSubsample(value float32) SkipgramAttr {
+ return func(m optionalAttr) {
+ m["subsample"] = value
+ }
+}
+
+// Parses a text file and creates a batch of examples.
+//
+// DEPRECATED at GraphDef version 19: Moving word2vec into tensorflow_models/tutorials and deprecating its ops here as a result
//
// Arguments:
-// var_: Should be from a Variable().
-// gradient_accumulator: Should be from a Variable().
-// gradient_squared_accumulator: Should be from a Variable().
-// grad: The gradient.
-// lr: Scaling factor. Must be a scalar.
-// l1: L1 regularization. Must be a scalar.
-// l2: L2 regularization. Must be a scalar.
-// global_step: Training step number. Must be a scalar.
+// filename: The corpus's text file name.
+// batch_size: The size of produced batch.
//
-// Returns the created operation.
-func ResourceApplyAdagradDA(scope *Scope, var_ tf.Output, gradient_accumulator tf.Output, gradient_squared_accumulator tf.Output, grad tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, global_step tf.Output, optional ...ResourceApplyAdagradDAAttr) (o *tf.Operation) {
+// Returns A vector of words in the corpus.Frequencies of words. Sorted in the non-ascending order.Number of words per epoch in the data file.The current epoch number.The total number of words processed so far.A vector of word ids.A vector of word ids.
+func Skipgram(scope *Scope, filename string, batch_size int64, optional ...SkipgramAttr) (vocab_word tf.Output, vocab_freq tf.Output, words_per_epoch tf.Output, current_epoch tf.Output, total_words_processed tf.Output, examples tf.Output, labels tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"filename": filename, "batch_size": batch_size}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ResourceApplyAdagradDA",
- Input: []tf.Input{
- var_, gradient_accumulator, gradient_squared_accumulator, grad, lr, l1, l2, global_step,
- },
+ Type: "Skipgram",
+
Attrs: attrs,
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6)
}
-// FractionalMaxPoolGradAttr is an optional argument to FractionalMaxPoolGrad.
-type FractionalMaxPoolGradAttr func(optionalAttr)
+// StringToNumberAttr is an optional argument to StringToNumber.
+type StringToNumberAttr func(optionalAttr)
-// FractionalMaxPoolGradOverlapping sets the optional overlapping attribute to value.
-//
-// value: When set to True, it means when pooling, the values at the boundary
-// of adjacent pooling cells are used by both cells. For example:
-//
-// `index 0 1 2 3 4`
-//
-// `value 20 5 16 3 7`
+// StringToNumberOutType sets the optional out_type attribute to value.
//
-// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
-// The result would be [20, 16] for fractional max pooling.
-// If not specified, defaults to false
-func FractionalMaxPoolGradOverlapping(value bool) FractionalMaxPoolGradAttr {
+// value: The numeric type to interpret each string in `string_tensor` as.
+// If not specified, defaults to DT_FLOAT
+func StringToNumberOutType(value tf.DataType) StringToNumberAttr {
return func(m optionalAttr) {
- m["overlapping"] = value
+ m["out_type"] = value
}
}
-// Computes gradient of the FractionalMaxPool function.
+// Converts each string in the input Tensor to the specified numeric type.
//
-// Arguments:
-// orig_input: Original input for `fractional_max_pool`
-// orig_output: Original output for `fractional_max_pool`
-// out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients
-// w.r.t. the output of `fractional_max_pool`.
-// row_pooling_sequence: row pooling sequence, form pooling region with
-// col_pooling_sequence.
-// col_pooling_sequence: column pooling sequence, form pooling region with
-// row_pooling sequence.
+// (Note that int32 overflow results in an error while float overflow
+// results in a rounded value.)
//
-// Returns 4-D. Gradients w.r.t. the input of `fractional_max_pool`.
-func FractionalMaxPoolGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, out_backprop tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output, optional ...FractionalMaxPoolGradAttr) (output tf.Output) {
+// Returns A Tensor of the same shape as the input `string_tensor`.
+func StringToNumber(scope *Scope, string_tensor tf.Output, optional ...StringToNumberAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -14765,9 +14931,9 @@ func FractionalMaxPoolGrad(scope *Scope, orig_input tf.Output, orig_output tf.Ou
a(attrs)
}
opspec := tf.OpSpec{
- Type: "FractionalMaxPoolGrad",
+ Type: "StringToNumber",
Input: []tf.Input{
- orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence,
+ string_tensor,
},
Attrs: attrs,
}
@@ -14775,53 +14941,44 @@ func FractionalMaxPoolGrad(scope *Scope, orig_input tf.Output, orig_output tf.Ou
return op.Output(0)
}
-// Does nothing. Serves as a control trigger for scheduling.
-//
-// Only useful as a placeholder for control edges.
-//
-// Returns the created operation.
-func ControlTrigger(scope *Scope) (o *tf.Operation) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "ControlTrigger",
- }
- return scope.AddOperation(opspec)
-}
-
-// ResourceApplyAddSignAttr is an optional argument to ResourceApplyAddSign.
-type ResourceApplyAddSignAttr func(optionalAttr)
+// ResourceApplyFtrlV2Attr is an optional argument to ResourceApplyFtrlV2.
+type ResourceApplyFtrlV2Attr func(optionalAttr)
-// ResourceApplyAddSignUseLocking sets the optional use_locking attribute to value.
+// ResourceApplyFtrlV2UseLocking sets the optional use_locking attribute to value.
//
-// value: If `True`, updating of the var and m tensors is
-// protected by a lock; otherwise the behavior is undefined, but may exhibit less
+// value: If `True`, updating of the var and accum tensors will be protected
+// by a lock; otherwise the behavior is undefined, but may exhibit less
// contention.
// If not specified, defaults to false
-func ResourceApplyAddSignUseLocking(value bool) ResourceApplyAddSignAttr {
+func ResourceApplyFtrlV2UseLocking(value bool) ResourceApplyFtrlV2Attr {
return func(m optionalAttr) {
m["use_locking"] = value
}
}
-// Update '*var' according to the AddSign update.
+// Update '*var' according to the Ftrl-proximal scheme.
//
-// m_t <- beta1 * m_{t-1} + (1 - beta1) * g
-// update <- (alpha + sign_decay * sign(g) *sign(m)) * g
-// variable <- variable - lr_t * update
+// grad_with_shrinkage = grad + 2 * l2_shrinkage * var
+// accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
+// linear += grad_with_shrinkage +
+// (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
+// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
+// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
+// accum = accum_new
//
// Arguments:
// var_: Should be from a Variable().
-// m: Should be from a Variable().
-// lr: Scaling factor. Must be a scalar.
-// alpha: Must be a scalar.
-// sign_decay: Must be a scalar.
-// beta: Must be a scalar.
+// accum: Should be from a Variable().
+// linear: Should be from a Variable().
// grad: The gradient.
+// lr: Scaling factor. Must be a scalar.
+// l1: L1 regulariation. Must be a scalar.
+// l2: L2 shrinkage regulariation. Must be a scalar.
+//
+// lr_power: Scaling factor. Must be a scalar.
//
// Returns the created operation.
-func ResourceApplyAddSign(scope *Scope, var_ tf.Output, m tf.Output, lr tf.Output, alpha tf.Output, sign_decay tf.Output, beta tf.Output, grad tf.Output, optional ...ResourceApplyAddSignAttr) (o *tf.Operation) {
+func ResourceApplyFtrlV2(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, l2_shrinkage tf.Output, lr_power tf.Output, optional ...ResourceApplyFtrlV2Attr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
@@ -14830,100 +14987,64 @@ func ResourceApplyAddSign(scope *Scope, var_ tf.Output, m tf.Output, lr tf.Outpu
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ResourceApplyAddSign",
+ Type: "ResourceApplyFtrlV2",
Input: []tf.Input{
- var_, m, lr, alpha, sign_decay, beta, grad,
+ var_, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power,
},
Attrs: attrs,
}
return scope.AddOperation(opspec)
}
-// Reorders a SparseTensor into the canonical, row-major ordering.
-//
-// Note that by convention, all sparse ops preserve the canonical ordering along
-// increasing dimension number. The only time ordering can be violated is during
-// manual manipulation of the indices and values vectors to add entries.
-//
-// Reordering does not affect the shape of the SparseTensor.
-//
-// If the tensor has rank `R` and `N` non-empty values, `input_indices` has
-// shape `[N, R]`, input_values has length `N`, and input_shape has length `R`.
-//
-// Arguments:
-// input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
-// SparseTensor, possibly not in canonical ordering.
-// input_values: 1-D. `N` non-empty values corresponding to `input_indices`.
-// input_shape: 1-D. Shape of the input SparseTensor.
+// TruncatedNormalAttr is an optional argument to TruncatedNormal.
+type TruncatedNormalAttr func(optionalAttr)
+
+// TruncatedNormalSeed sets the optional seed attribute to value.
//
-// Returns 2-D. `N x R` matrix with the same indices as input_indices, but
-// in canonical row-major ordering.1-D. `N` non-empty values corresponding to `output_indices`.
-func SparseReorder(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output) (output_indices tf.Output, output_values tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "SparseReorder",
- Input: []tf.Input{
- input_indices, input_values, input_shape,
- },
+// value: If either `seed` or `seed2` are set to be non-zero, the random number
+// generator is seeded by the given seed. Otherwise, it is seeded by a
+// random seed.
+// If not specified, defaults to 0
+func TruncatedNormalSeed(value int64) TruncatedNormalAttr {
+ return func(m optionalAttr) {
+ m["seed"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
}
-// PackAttr is an optional argument to Pack.
-type PackAttr func(optionalAttr)
-
-// PackAxis sets the optional axis attribute to value.
+// TruncatedNormalSeed2 sets the optional seed2 attribute to value.
//
-// value: Dimension along which to pack. Negative values wrap around, so the
-// valid range is `[-(R+1), R+1)`.
+// value: A second seed to avoid seed collision.
// If not specified, defaults to 0
-func PackAxis(value int64) PackAttr {
+func TruncatedNormalSeed2(value int64) TruncatedNormalAttr {
return func(m optionalAttr) {
- m["axis"] = value
+ m["seed2"] = value
}
}
-// Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor.
-//
-// Packs the `N` tensors in `values` into a tensor with rank one higher than each
-// tensor in `values`, by packing them along the `axis` dimension.
-// Given a list of tensors of shape `(A, B, C)`;
-//
-// if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
-// if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
-// Etc.
-//
-// For example:
-//
-// ```
-// # 'x' is [1, 4]
-// # 'y' is [2, 5]
-// # 'z' is [3, 6]
-// pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.
-// pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
-// ```
+// Outputs random values from a truncated normal distribution.
//
-// This is the opposite of `unpack`.
+// The generated values follow a normal distribution with mean 0 and standard
+// deviation 1, except that values whose magnitude is more than 2 standard
+// deviations from the mean are dropped and re-picked.
//
// Arguments:
-// values: Must be of same shape and type.
+// shape: The shape of the output tensor.
+// dtype: The type of the output.
//
-// Returns The packed tensor.
-func Pack(scope *Scope, values []tf.Output, optional ...PackAttr) (output tf.Output) {
+// Returns A tensor of the specified shape filled with random truncated normal
+// values.
+func TruncatedNormal(scope *Scope, shape tf.Output, dtype tf.DataType, optional ...TruncatedNormalAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"dtype": dtype}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "Pack",
+ Type: "TruncatedNormal",
Input: []tf.Input{
- tf.OutputList(values),
+ shape,
},
Attrs: attrs,
}
@@ -14931,41 +15052,39 @@ func Pack(scope *Scope, values []tf.Output, optional ...PackAttr) (output tf.Out
return op.Output(0)
}
-// Deprecated. Use TensorArraySplitV3
-func TensorArraySplitV2(scope *Scope, handle tf.Output, value tf.Output, lengths tf.Output, flow_in tf.Output) (flow_out tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "TensorArraySplitV2",
- Input: []tf.Input{
- handle, value, lengths, flow_in,
- },
+// FakeQuantWithMinMaxVarsPerChannelAttr is an optional argument to FakeQuantWithMinMaxVarsPerChannel.
+type FakeQuantWithMinMaxVarsPerChannelAttr func(optionalAttr)
+
+// FakeQuantWithMinMaxVarsPerChannelNumBits sets the optional num_bits attribute to value.
+// If not specified, defaults to 8
+func FakeQuantWithMinMaxVarsPerChannelNumBits(value int64) FakeQuantWithMinMaxVarsPerChannelAttr {
+ return func(m optionalAttr) {
+ m["num_bits"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// QuantizedReluAttr is an optional argument to QuantizedRelu.
-type QuantizedReluAttr func(optionalAttr)
-
-// QuantizedReluOutType sets the optional out_type attribute to value.
-// If not specified, defaults to DT_QUINT8
-func QuantizedReluOutType(value tf.DataType) QuantizedReluAttr {
+// FakeQuantWithMinMaxVarsPerChannelNarrowRange sets the optional narrow_range attribute to value.
+// If not specified, defaults to false
+func FakeQuantWithMinMaxVarsPerChannelNarrowRange(value bool) FakeQuantWithMinMaxVarsPerChannelAttr {
return func(m optionalAttr) {
- m["out_type"] = value
+ m["narrow_range"] = value
}
}
-// Computes Quantized Rectified Linear: `max(features, 0)`
+// Fake-quantize the 'inputs' tensor of type float and one of the shapes: `[d]`,
//
-// Arguments:
+// `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` of shape `[d]`
+// to 'outputs' tensor of same shape as `inputs`.
//
-// min_features: The float value that the lowest quantized value represents.
-// max_features: The float value that the highest quantized value represents.
+// `[min; max]` define the clamping range for the `inputs` data.
+// `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
+// when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
+// then de-quantized and output as floats in `[min; max]` interval.
+// `num_bits` is the bitwidth of the quantization; between 2 and 8, inclusive.
//
-// Returns Has the same output shape as "features".The float value that the lowest quantized value represents.The float value that the highest quantized value represents.
-func QuantizedRelu(scope *Scope, features tf.Output, min_features tf.Output, max_features tf.Output, optional ...QuantizedReluAttr) (activations tf.Output, min_activations tf.Output, max_activations tf.Output) {
+// This operation has a gradient and thus allows for training `min` and `max`
+// values.
+func FakeQuantWithMinMaxVarsPerChannel(scope *Scope, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsPerChannelAttr) (outputs tf.Output) {
if scope.Err() != nil {
return
}
@@ -14974,247 +15093,170 @@ func QuantizedRelu(scope *Scope, features tf.Output, min_features tf.Output, max
a(attrs)
}
opspec := tf.OpSpec{
- Type: "QuantizedRelu",
+ Type: "FakeQuantWithMinMaxVarsPerChannel",
Input: []tf.Input{
- features, min_features, max_features,
+ inputs, min, max,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
+ return op.Output(0)
}
-// InitializeTableFromTextFileV2Attr is an optional argument to InitializeTableFromTextFileV2.
-type InitializeTableFromTextFileV2Attr func(optionalAttr)
+// RandomShuffleAttr is an optional argument to RandomShuffle.
+type RandomShuffleAttr func(optionalAttr)
-// InitializeTableFromTextFileV2VocabSize sets the optional vocab_size attribute to value.
-//
-// value: Number of elements of the file, use -1 if unknown.
-// If not specified, defaults to -1
+// RandomShuffleSeed sets the optional seed attribute to value.
//
-// REQUIRES: value >= -1
-func InitializeTableFromTextFileV2VocabSize(value int64) InitializeTableFromTextFileV2Attr {
+// value: If either `seed` or `seed2` are set to be non-zero, the random number
+// generator is seeded by the given seed. Otherwise, it is seeded by a
+// random seed.
+// If not specified, defaults to 0
+func RandomShuffleSeed(value int64) RandomShuffleAttr {
return func(m optionalAttr) {
- m["vocab_size"] = value
+ m["seed"] = value
}
}
-// InitializeTableFromTextFileV2Delimiter sets the optional delimiter attribute to value.
+// RandomShuffleSeed2 sets the optional seed2 attribute to value.
//
-// value: Delimiter to separate fields in a line.
-// If not specified, defaults to "\t"
-func InitializeTableFromTextFileV2Delimiter(value string) InitializeTableFromTextFileV2Attr {
+// value: A second seed to avoid seed collision.
+// If not specified, defaults to 0
+func RandomShuffleSeed2(value int64) RandomShuffleAttr {
return func(m optionalAttr) {
- m["delimiter"] = value
+ m["seed2"] = value
}
}
-// Initializes a table from a text file.
+// Randomly shuffles a tensor along its first dimension.
//
-// It inserts one key-value pair into the table for each line of the file.
-// The key and value is extracted from the whole line content, elements from the
-// split line based on `delimiter` or the line number (starting from zero).
-// Where to extract the key and value from a line is specified by `key_index` and
-// `value_index`.
+// The tensor is shuffled along dimension 0, such that each `value[j]` is mapped
+// to one and only one `output[i]`. For example, a mapping that might occur for a
+// 3x2 tensor is:
//
-// - A value of -1 means use the line number(starting from zero), expects `int64`.
-// - A value of -2 means use the whole line content, expects `string`.
-// - A value >= 0 means use the index (starting at zero) of the split line based
-// on `delimiter`.
+// ```
+// [[1, 2], [[5, 6],
+// [3, 4], ==> [1, 2],
+// [5, 6]] [3, 4]]
+// ```
//
// Arguments:
-// table_handle: Handle to a table which will be initialized.
-// filename: Filename of a vocabulary text file.
-// key_index: Column index in a line to get the table `key` values from.
-// value_index: Column index that represents information of a line to get the table
-// `value` values from.
+// value: The tensor to be shuffled.
//
-// Returns the created operation.
-func InitializeTableFromTextFileV2(scope *Scope, table_handle tf.Output, filename tf.Output, key_index int64, value_index int64, optional ...InitializeTableFromTextFileV2Attr) (o *tf.Operation) {
+// Returns A tensor of same shape and type as `value`, shuffled along its first
+// dimension.
+func RandomShuffle(scope *Scope, value tf.Output, optional ...RandomShuffleAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"key_index": key_index, "value_index": value_index}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "InitializeTableFromTextFileV2",
+ Type: "RandomShuffle",
Input: []tf.Input{
- table_handle, filename,
+ value,
},
Attrs: attrs,
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// ResourceSparseApplyProximalGradientDescentAttr is an optional argument to ResourceSparseApplyProximalGradientDescent.
-type ResourceSparseApplyProximalGradientDescentAttr func(optionalAttr)
+// OrderedMapIncompleteSizeAttr is an optional argument to OrderedMapIncompleteSize.
+type OrderedMapIncompleteSizeAttr func(optionalAttr)
-// ResourceSparseApplyProximalGradientDescentUseLocking sets the optional use_locking attribute to value.
+// OrderedMapIncompleteSizeCapacity sets the optional capacity attribute to value.
+// If not specified, defaults to 0
//
-// value: If True, the subtraction will be protected by a lock;
-// otherwise the behavior is undefined, but may exhibit less contention.
-// If not specified, defaults to false
-func ResourceSparseApplyProximalGradientDescentUseLocking(value bool) ResourceSparseApplyProximalGradientDescentAttr {
+// REQUIRES: value >= 0
+func OrderedMapIncompleteSizeCapacity(value int64) OrderedMapIncompleteSizeAttr {
return func(m optionalAttr) {
- m["use_locking"] = value
+ m["capacity"] = value
}
}
-// Sparse update '*var' as FOBOS algorithm with fixed learning rate.
-//
-// That is for rows we have grad for, we update var as follows:
-// prox_v = var - alpha * grad
-// var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
-//
-// Arguments:
-// var_: Should be from a Variable().
-// alpha: Scaling factor. Must be a scalar.
-// l1: L1 regularization. Must be a scalar.
-// l2: L2 regularization. Must be a scalar.
-// grad: The gradient.
-// indices: A vector of indices into the first dimension of var and accum.
+// OrderedMapIncompleteSizeMemoryLimit sets the optional memory_limit attribute to value.
+// If not specified, defaults to 0
//
-// Returns the created operation.
-func ResourceSparseApplyProximalGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyProximalGradientDescentAttr) (o *tf.Operation) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
- opspec := tf.OpSpec{
- Type: "ResourceSparseApplyProximalGradientDescent",
- Input: []tf.Input{
- var_, alpha, l1, l2, grad, indices,
- },
- Attrs: attrs,
+// REQUIRES: value >= 0
+func OrderedMapIncompleteSizeMemoryLimit(value int64) OrderedMapIncompleteSizeAttr {
+ return func(m optionalAttr) {
+ m["memory_limit"] = value
}
- return scope.AddOperation(opspec)
}
-// Records the bytes size of each element of `input_dataset` in a StatsAggregator.
-func BytesProducedStatsDataset(scope *Scope, input_dataset tf.Output, tag tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
- opspec := tf.OpSpec{
- Type: "BytesProducedStatsDataset",
- Input: []tf.Input{
- input_dataset, tag,
- },
- Attrs: attrs,
+// OrderedMapIncompleteSizeContainer sets the optional container attribute to value.
+// If not specified, defaults to ""
+func OrderedMapIncompleteSizeContainer(value string) OrderedMapIncompleteSizeAttr {
+ return func(m optionalAttr) {
+ m["container"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// QrAttr is an optional argument to Qr.
-type QrAttr func(optionalAttr)
-
-// QrFullMatrices sets the optional full_matrices attribute to value.
-//
-// value: If true, compute full-sized `q` and `r`. If false
-// (the default), compute only the leading `P` columns of `q`.
-// If not specified, defaults to false
-func QrFullMatrices(value bool) QrAttr {
+// OrderedMapIncompleteSizeSharedName sets the optional shared_name attribute to value.
+// If not specified, defaults to ""
+func OrderedMapIncompleteSizeSharedName(value string) OrderedMapIncompleteSizeAttr {
return func(m optionalAttr) {
- m["full_matrices"] = value
+ m["shared_name"] = value
}
}
-// Computes the QR decompositions of one or more matrices.
-//
-// Computes the QR decomposition of each inner matrix in `tensor` such that
-// `tensor[..., :, :] = q[..., :, :] * r[..., :,:])`
-//
-// ```python
-// # a is a tensor.
-// # q is a tensor of orthonormal matrices.
-// # r is a tensor of upper triangular matrices.
-// q, r = qr(a)
-// q_full, r_full = qr(a, full_matrices=True)
-// ```
-//
-// Arguments:
-// input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
-// form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.
-//
-// Returns Orthonormal basis for range of `a`. If `full_matrices` is `False` then
-// shape is `[..., M, P]`; if `full_matrices` is `True` then shape is
-// `[..., M, M]`.Triangular factor. If `full_matrices` is `False` then shape is
-// `[..., P, N]`. If `full_matrices` is `True` then shape is `[..., M, N]`.
-func Qr(scope *Scope, input tf.Output, optional ...QrAttr) (q tf.Output, r tf.Output) {
+// Op returns the number of incomplete elements in the underlying container.
+func OrderedMapIncompleteSize(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapIncompleteSizeAttr) (size tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"dtypes": dtypes}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "Qr",
- Input: []tf.Input{
- input,
- },
+ Type: "OrderedMapIncompleteSize",
+
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
+ return op.Output(0)
}
-// AudioSummaryAttr is an optional argument to AudioSummary.
-type AudioSummaryAttr func(optionalAttr)
+// DecodeRawAttr is an optional argument to DecodeRaw.
+type DecodeRawAttr func(optionalAttr)
-// AudioSummaryMaxOutputs sets the optional max_outputs attribute to value.
-//
-// value: Max number of batch elements to generate audio for.
-// If not specified, defaults to 3
+// DecodeRawLittleEndian sets the optional little_endian attribute to value.
//
-// REQUIRES: value >= 1
-func AudioSummaryMaxOutputs(value int64) AudioSummaryAttr {
+// value: Whether the input `bytes` are in little-endian order.
+// Ignored for `out_type` values that are stored in a single byte like
+// `uint8`.
+// If not specified, defaults to true
+func DecodeRawLittleEndian(value bool) DecodeRawAttr {
return func(m optionalAttr) {
- m["max_outputs"] = value
+ m["little_endian"] = value
}
}
-// Outputs a `Summary` protocol buffer with audio.
-//
-// DEPRECATED at GraphDef version 15: Use AudioSummaryV2.
-//
-// The summary has up to `max_outputs` summary values containing audio. The
-// audio is built from `tensor` which must be 3-D with shape `[batch_size,
-// frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
-// assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
-//
-// The `tag` argument is a scalar `Tensor` of type `string`. It is used to
-// build the `tag` of the summary values:
-//
-// * If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
-// * If `max_outputs` is greater than 1, the summary value tags are
-// generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
+// Reinterpret the bytes of a string as a vector of numbers.
//
// Arguments:
-// tag: Scalar. Used to build the `tag` attribute of the summary values.
-// tensor: 2-D of shape `[batch_size, frames]`.
-// sample_rate: The sample rate of the signal in hertz.
+// bytes: All the elements must have the same length.
//
-// Returns Scalar. Serialized `Summary` protocol buffer.
-func AudioSummary(scope *Scope, tag tf.Output, tensor tf.Output, sample_rate float32, optional ...AudioSummaryAttr) (summary tf.Output) {
+//
+// Returns A Tensor with one more dimension than the input `bytes`. The
+// added dimension will have size equal to the length of the elements
+// of `bytes` divided by the number of bytes to represent `out_type`.
+func DecodeRaw(scope *Scope, bytes tf.Output, out_type tf.DataType, optional ...DecodeRawAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"sample_rate": sample_rate}
+ attrs := map[string]interface{}{"out_type": out_type}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "AudioSummary",
+ Type: "DecodeRaw",
Input: []tf.Input{
- tag, tensor,
+ bytes,
},
Attrs: attrs,
}
@@ -15222,124 +15264,99 @@ func AudioSummary(scope *Scope, tag tf.Output, tensor tf.Output, sample_rate flo
return op.Output(0)
}
-// Reverses specific dimensions of a tensor.
+// Copy a tensor setting everything outside a central band in each innermost matrix
//
-// NOTE `tf.reverse` has now changed behavior in preparation for 1.0.
-// `tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0.
+// to zero.
//
-// Given a `tensor`, and a `int32` tensor `axis` representing the set of
-// dimensions of `tensor` to reverse. This operation reverses each dimension
-// `i` for which there exists `j` s.t. `axis[j] == i`.
+// The `band` part is computed as follows:
+// Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
+// tensor with the same shape where
//
-// `tensor` can have up to 8 dimensions. The number of dimensions specified
-// in `axis` may be 0 or more entries. If an index is specified more than
-// once, a InvalidArgument error is raised.
+// `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.
+//
+// The indicator function
+//
+// `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) &&
+// (num_upper < 0 || (n-m) <= num_upper)`.
//
// For example:
//
// ```
-// # tensor 't' is [[[[ 0, 1, 2, 3],
-// # [ 4, 5, 6, 7],
-// # [ 8, 9, 10, 11]],
-// # [[12, 13, 14, 15],
-// # [16, 17, 18, 19],
-// # [20, 21, 22, 23]]]]
-// # tensor 't' shape is [1, 2, 3, 4]
+// # if 'input' is [[ 0, 1, 2, 3]
+// [-1, 0, 1, 2]
+// [-2, -1, 0, 1]
+// [-3, -2, -1, 0]],
//
-// # 'dims' is [3] or 'dims' is [-1]
-// reverse(t, dims) ==> [[[[ 3, 2, 1, 0],
-// [ 7, 6, 5, 4],
-// [ 11, 10, 9, 8]],
-// [[15, 14, 13, 12],
-// [19, 18, 17, 16],
-// [23, 22, 21, 20]]]]
+// tf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3]
+// [-1, 0, 1, 2]
+// [ 0, -1, 0, 1]
+// [ 0, 0, -1, 0]],
//
-// # 'dims' is '[1]' (or 'dims' is '[-3]')
-// reverse(t, dims) ==> [[[[12, 13, 14, 15],
-// [16, 17, 18, 19],
-// [20, 21, 22, 23]
-// [[ 0, 1, 2, 3],
-// [ 4, 5, 6, 7],
-// [ 8, 9, 10, 11]]]]
+// tf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0]
+// [-1, 0, 1, 0]
+// [-2, -1, 0, 1]
+// [ 0, -2, -1, 0]]
+// ```
//
-// # 'dims' is '[2]' (or 'dims' is '[-2]')
-// reverse(t, dims) ==> [[[[8, 9, 10, 11],
-// [4, 5, 6, 7],
-// [0, 1, 2, 3]]
-// [[20, 21, 22, 23],
-// [16, 17, 18, 19],
-// [12, 13, 14, 15]]]]
+// Useful special cases:
+//
+// ```
+// tf.matrix_band_part(input, 0, -1) ==> Upper triangular part.
+// tf.matrix_band_part(input, -1, 0) ==> Lower triangular part.
+// tf.matrix_band_part(input, 0, 0) ==> Diagonal.
// ```
//
// Arguments:
-// tensor: Up to 8-D.
-// axis: 1-D. The indices of the dimensions to reverse. Must be in the range
-// `[-rank(tensor), rank(tensor))`.
+// input: Rank `k` tensor.
+// num_lower: 0-D tensor. Number of subdiagonals to keep. If negative, keep entire
+// lower triangle.
+// num_upper: 0-D tensor. Number of superdiagonals to keep. If negative, keep
+// entire upper triangle.
//
-// Returns The same shape as `tensor`.
-func ReverseV2(scope *Scope, tensor tf.Output, axis tf.Output) (output tf.Output) {
+// Returns Rank `k` tensor of the same shape as input. The extracted banded tensor.
+func MatrixBandPart(scope *Scope, input tf.Output, num_lower tf.Output, num_upper tf.Output) (band tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "ReverseV2",
+ Type: "MatrixBandPart",
Input: []tf.Input{
- tensor, axis,
+ input, num_lower, num_upper,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// ResourceApplyCenteredRMSPropAttr is an optional argument to ResourceApplyCenteredRMSProp.
-type ResourceApplyCenteredRMSPropAttr func(optionalAttr)
+// DecodeCompressedAttr is an optional argument to DecodeCompressed.
+type DecodeCompressedAttr func(optionalAttr)
-// ResourceApplyCenteredRMSPropUseLocking sets the optional use_locking attribute to value.
+// DecodeCompressedCompressionType sets the optional compression_type attribute to value.
//
-// value: If `True`, updating of the var, mg, ms, and mom tensors is
-// protected by a lock; otherwise the behavior is undefined, but may exhibit less
-// contention.
-// If not specified, defaults to false
-func ResourceApplyCenteredRMSPropUseLocking(value bool) ResourceApplyCenteredRMSPropAttr {
+// value: A scalar containing either (i) the empty string (no
+// compression), (ii) "ZLIB", or (iii) "GZIP".
+// If not specified, defaults to ""
+func DecodeCompressedCompressionType(value string) DecodeCompressedAttr {
return func(m optionalAttr) {
- m["use_locking"] = value
+ m["compression_type"] = value
}
}
-// Update '*var' according to the centered RMSProp algorithm.
-//
-// The centered RMSProp algorithm uses an estimate of the centered second moment
-// (i.e., the variance) for normalization, as opposed to regular RMSProp, which
-// uses the (uncentered) second moment. This often helps with training, but is
-// slightly more expensive in terms of computation and memory.
-//
-// Note that in dense implementation of this algorithm, mg, ms, and mom will
-// update even if the grad is zero, but in this sparse implementation, mg, ms,
-// and mom will not update in iterations during which the grad is zero.
-//
-// mean_square = decay * mean_square + (1-decay) * gradient ** 2
-// mean_grad = decay * mean_grad + (1-decay) * gradient
+// Decompress strings.
//
-// Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
+// This op decompresses each element of the `bytes` input `Tensor`, which
+// is assumed to be compressed using the given `compression_type`.
//
-// mg <- rho * mg_{t-1} + (1-rho) * grad
-// ms <- rho * ms_{t-1} + (1-rho) * grad * grad
-// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)
-// var <- var - mom
+// The `output` is a string `Tensor` of the same shape as `bytes`,
+// each element containing the decompressed data from the corresponding
+// element in `bytes`.
//
// Arguments:
-// var_: Should be from a Variable().
-// mg: Should be from a Variable().
-// ms: Should be from a Variable().
-// mom: Should be from a Variable().
-// lr: Scaling factor. Must be a scalar.
-// rho: Decay rate. Must be a scalar.
-//
-// epsilon: Ridge term. Must be a scalar.
-// grad: The gradient.
+// bytes: A Tensor of string which is compressed.
//
-// Returns the created operation.
-func ResourceApplyCenteredRMSProp(scope *Scope, var_ tf.Output, mg tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyCenteredRMSPropAttr) (o *tf.Operation) {
+// Returns A Tensor with the same shape as input `bytes`, uncompressed
+// from bytes.
+func DecodeCompressed(scope *Scope, bytes tf.Output, optional ...DecodeCompressedAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -15348,239 +15365,329 @@ func ResourceApplyCenteredRMSProp(scope *Scope, var_ tf.Output, mg tf.Output, ms
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ResourceApplyCenteredRMSProp",
+ Type: "DecodeCompressed",
Input: []tf.Input{
- var_, mg, ms, mom, lr, rho, momentum, epsilon, grad,
+ bytes,
},
Attrs: attrs,
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Inverse 3D fast Fourier transform.
+// WholeFileReaderV2Attr is an optional argument to WholeFileReaderV2.
+type WholeFileReaderV2Attr func(optionalAttr)
+
+// WholeFileReaderV2Container sets the optional container attribute to value.
//
-// Computes the inverse 3-dimensional discrete Fourier transform over the
-// inner-most 3 dimensions of `input`.
+// value: If non-empty, this reader is placed in the given container.
+// Otherwise, a default container is used.
+// If not specified, defaults to ""
+func WholeFileReaderV2Container(value string) WholeFileReaderV2Attr {
+ return func(m optionalAttr) {
+ m["container"] = value
+ }
+}
+
+// WholeFileReaderV2SharedName sets the optional shared_name attribute to value.
//
-// Arguments:
-// input: A complex64 tensor.
+// value: If non-empty, this reader is named in the given bucket
+// with this shared_name. Otherwise, the node name is used instead.
+// If not specified, defaults to ""
+func WholeFileReaderV2SharedName(value string) WholeFileReaderV2Attr {
+ return func(m optionalAttr) {
+ m["shared_name"] = value
+ }
+}
+
+// A Reader that outputs the entire contents of a file as a value.
//
-// Returns A complex64 tensor of the same shape as `input`. The inner-most 3
-// dimensions of `input` are replaced with their inverse 3D Fourier transform.
+// To use, enqueue filenames in a Queue. The output of ReaderRead will
+// be a filename (key) and the contents of that file (value).
//
-// @compatibility(numpy)
-// Equivalent to np.fft.ifftn with 3 dimensions.
-// @end_compatibility
-func IFFT3D(scope *Scope, input tf.Output) (output tf.Output) {
+// Returns The handle to reference the Reader.
+func WholeFileReaderV2(scope *Scope, optional ...WholeFileReaderV2Attr) (reader_handle tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "IFFT3D",
- Input: []tf.Input{
- input,
- },
+ Type: "WholeFileReaderV2",
+
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Increments variable pointed to by 'resource' until it reaches 'limit'.
+// Transforms a tf.Example proto (as a string) into typed tensors.
//
// Arguments:
-// resource: Should be from a scalar `Variable` node.
-// limit: If incrementing ref would bring it above limit, instead generates an
-// 'OutOfRange' error.
-//
-//
-// Returns A copy of the input before increment. If nothing else modifies the
-// input, the values produced will all be distinct.
-func ResourceCountUpTo(scope *Scope, resource tf.Output, limit int64, T tf.DataType) (output tf.Output) {
+// serialized: A vector containing a batch of binary serialized Example protos.
+// dense_defaults: A list of Tensors (some may be empty), whose length matches
+// the length of `dense_keys`. dense_defaults[j] provides default values
+// when the example's feature_map lacks dense_key[j]. If an empty Tensor is
+// provided for dense_defaults[j], then the Feature dense_keys[j] is required.
+// The input type is inferred from dense_defaults[j], even when it's empty.
+// If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined,
+// then the shape of dense_defaults[j] must match that of dense_shapes[j].
+// If dense_shapes[j] has an undefined major dimension (variable strides dense
+// feature), dense_defaults[j] must contain a single element:
+// the padding element.
+// num_sparse: The number of sparse features to be parsed from the example. This
+// must match the lengths of `sparse_keys` and `sparse_types`.
+// sparse_keys: A list of `num_sparse` strings.
+// The keys expected in the Examples' features associated with sparse values.
+// dense_keys: The keys expected in the Examples' features associated with dense
+// values.
+// sparse_types: A list of `num_sparse` types; the data types of data in each
+// Feature given in sparse_keys.
+// Currently the ParseSingleExample op supports DT_FLOAT (FloatList),
+// DT_INT64 (Int64List), and DT_STRING (BytesList).
+// dense_shapes: The shapes of data in each Feature given in dense_keys.
+// The length of this list must match the length of `dense_keys`. The
+// number of elements in the Feature corresponding to dense_key[j] must
+// always equal dense_shapes[j].NumEntries(). If dense_shapes[j] ==
+// (D0, D1, ..., DN) then the shape of output Tensor dense_values[j]
+// will be (D0, D1, ..., DN): In the case dense_shapes[j] = (-1, D1,
+// ..., DN), the shape of the output Tensor dense_values[j] will be (M,
+// D1, .., DN), where M is the number of blocks of elements of length
+// D1 * .... * DN, in the input.
+func ParseSingleExample(scope *Scope, serialized tf.Output, dense_defaults []tf.Output, num_sparse int64, sparse_keys []string, dense_keys []string, sparse_types []tf.DataType, dense_shapes []tf.Shape) (sparse_indices []tf.Output, sparse_values []tf.Output, sparse_shapes []tf.Output, dense_values []tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"limit": limit, "T": T}
+ attrs := map[string]interface{}{"num_sparse": num_sparse, "sparse_keys": sparse_keys, "dense_keys": dense_keys, "sparse_types": sparse_types, "dense_shapes": dense_shapes}
opspec := tf.OpSpec{
- Type: "ResourceCountUpTo",
+ Type: "ParseSingleExample",
Input: []tf.Input{
- resource,
+ serialized, tf.OutputList(dense_defaults),
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ if scope.Err() != nil {
+ return
+ }
+ var idx int
+ var err error
+ if sparse_indices, idx, err = makeOutputList(op, idx, "sparse_indices"); err != nil {
+ scope.UpdateErr("ParseSingleExample", err)
+ return
+ }
+ if sparse_values, idx, err = makeOutputList(op, idx, "sparse_values"); err != nil {
+ scope.UpdateErr("ParseSingleExample", err)
+ return
+ }
+ if sparse_shapes, idx, err = makeOutputList(op, idx, "sparse_shapes"); err != nil {
+ scope.UpdateErr("ParseSingleExample", err)
+ return
+ }
+ if dense_values, idx, err = makeOutputList(op, idx, "dense_values"); err != nil {
+ scope.UpdateErr("ParseSingleExample", err)
+ return
+ }
+ return sparse_indices, sparse_values, sparse_shapes, dense_values
}
-// Looks up keys in a table, outputs the corresponding values.
-//
-// The tensor `keys` must of the same type as the keys of the table.
-// The output `values` is of the type of the table values.
-//
-// The scalar `default_value` is the value output for keys not present in the
-// table. It must also be of the same type as the table values.
-//
-// Arguments:
-// table_handle: Handle to the table.
-// keys: Any shape. Keys to look up.
-//
-//
-// Returns Same shape as `keys`. Values found in the table, or `default_values`
-// for missing keys.
-func LookupTableFindV2(scope *Scope, table_handle tf.Output, keys tf.Output, default_value tf.Output) (values tf.Output) {
+// Computes acos of x element-wise.
+func Acos(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "LookupTableFindV2",
+ Type: "Acos",
Input: []tf.Input{
- table_handle, keys, default_value,
+ x,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// MatrixSolveAttr is an optional argument to MatrixSolve.
-type MatrixSolveAttr func(optionalAttr)
+// MaxPoolWithArgmaxAttr is an optional argument to MaxPoolWithArgmax.
+type MaxPoolWithArgmaxAttr func(optionalAttr)
-// MatrixSolveAdjoint sets the optional adjoint attribute to value.
-//
-// value: Boolean indicating whether to solve with `matrix` or its (block-wise)
-// adjoint.
-// If not specified, defaults to false
-func MatrixSolveAdjoint(value bool) MatrixSolveAttr {
+// MaxPoolWithArgmaxTargmax sets the optional Targmax attribute to value.
+// If not specified, defaults to DT_INT64
+func MaxPoolWithArgmaxTargmax(value tf.DataType) MaxPoolWithArgmaxAttr {
return func(m optionalAttr) {
- m["adjoint"] = value
+ m["Targmax"] = value
}
}
-// Solves systems of linear equations.
+// Performs max pooling on the input and outputs both max values and indices.
//
-// `Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
-// form square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is
-// a tensor shape `[..., M, K]`. If `adjoint` is `False` then each output matrix
-// satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
-// If `adjoint` is `True` then each output matrix satisfies
-// `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`.
+// The indices in `argmax` are flattened, so that a maximum value at position
+// `[b, y, x, c]` becomes flattened index
+// `((b * height + y) * width + x) * channels + c`.
+//
+// The indices returned are always in `[0, height) x [0, width)` before flattening,
+// even if padding is involved and the mathematically correct answer is outside
+// (either negative or too large). This is a bug, but fixing it is difficult to do
+// in a safe backwards compatible way, especially due to flattening.
//
// Arguments:
-// matrix: Shape is `[..., M, M]`.
-// rhs: Shape is `[..., M, K]`.
+// input: 4-D with shape `[batch, height, width, channels]`. Input to pool over.
+// ksize: The size of the window for each dimension of the input tensor.
+// strides: The stride of the sliding window for each dimension of the
+// input tensor.
+// padding: The type of padding algorithm to use.
//
-// Returns Shape is `[..., M, K]`.
-func MatrixSolve(scope *Scope, matrix tf.Output, rhs tf.Output, optional ...MatrixSolveAttr) (output tf.Output) {
+// Returns The max pooled output tensor.4-D. The flattened indices of the max values chosen for each output.
+func MaxPoolWithArgmax(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolWithArgmaxAttr) (output tf.Output, argmax tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "MatrixSolve",
+ Type: "MaxPoolWithArgmax",
Input: []tf.Input{
- matrix, rhs,
+ input,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1)
}
-// Transforms a Tensor into a serialized TensorProto proto.
+// Transforms a serialized tensorflow.TensorProto proto into a Tensor.
//
// Arguments:
-// tensor: A Tensor of type `T`.
+// serialized: A scalar string containing a serialized TensorProto proto.
+// out_type: The type of the serialized tensor. The provided type must match the
+// type of the serialized tensor and no implicit conversion will take place.
//
-// Returns A serialized TensorProto proto of the input tensor.
-func SerializeTensor(scope *Scope, tensor tf.Output) (serialized tf.Output) {
+// Returns A Tensor of type `out_type`.
+func ParseTensor(scope *Scope, serialized tf.Output, out_type tf.DataType) (output tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"out_type": out_type}
opspec := tf.OpSpec{
- Type: "SerializeTensor",
+ Type: "ParseTensor",
Input: []tf.Input{
- tensor,
+ serialized,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Creates a dataset that contains the unique elements of `input_dataset`.
-func UniqueDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
+// MapClearAttr is an optional argument to MapClear.
+type MapClearAttr func(optionalAttr)
+
+// MapClearCapacity sets the optional capacity attribute to value.
+// If not specified, defaults to 0
+//
+// REQUIRES: value >= 0
+func MapClearCapacity(value int64) MapClearAttr {
+ return func(m optionalAttr) {
+ m["capacity"] = value
+ }
+}
+
+// MapClearMemoryLimit sets the optional memory_limit attribute to value.
+// If not specified, defaults to 0
+//
+// REQUIRES: value >= 0
+func MapClearMemoryLimit(value int64) MapClearAttr {
+ return func(m optionalAttr) {
+ m["memory_limit"] = value
+ }
+}
+
+// MapClearContainer sets the optional container attribute to value.
+// If not specified, defaults to ""
+func MapClearContainer(value string) MapClearAttr {
+ return func(m optionalAttr) {
+ m["container"] = value
+ }
+}
+
+// MapClearSharedName sets the optional shared_name attribute to value.
+// If not specified, defaults to ""
+func MapClearSharedName(value string) MapClearAttr {
+ return func(m optionalAttr) {
+ m["shared_name"] = value
+ }
+}
+
+// Op removes all elements in the underlying container.
+//
+// Returns the created operation.
+func MapClear(scope *Scope, dtypes []tf.DataType, optional ...MapClearAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
+ attrs := map[string]interface{}{"dtypes": dtypes}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "UniqueDataset",
- Input: []tf.Input{
- input_dataset,
- },
+ Type: "MapClear",
+
Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// FusedBatchNormGradAttr is an optional argument to FusedBatchNormGrad.
-type FusedBatchNormGradAttr func(optionalAttr)
+// DecodeCSVAttr is an optional argument to DecodeCSV.
+type DecodeCSVAttr func(optionalAttr)
-// FusedBatchNormGradEpsilon sets the optional epsilon attribute to value.
+// DecodeCSVFieldDelim sets the optional field_delim attribute to value.
//
-// value: A small float number added to the variance of x.
-// If not specified, defaults to 0.0001
-func FusedBatchNormGradEpsilon(value float32) FusedBatchNormGradAttr {
+// value: char delimiter to separate fields in a record.
+// If not specified, defaults to ","
+func DecodeCSVFieldDelim(value string) DecodeCSVAttr {
return func(m optionalAttr) {
- m["epsilon"] = value
+ m["field_delim"] = value
}
}
-// FusedBatchNormGradDataFormat sets the optional data_format attribute to value.
+// DecodeCSVUseQuoteDelim sets the optional use_quote_delim attribute to value.
//
-// value: The data format for y_backprop, x, x_backprop.
-// Either "NHWC" (default) or "NCHW".
-// If not specified, defaults to "NHWC"
-func FusedBatchNormGradDataFormat(value string) FusedBatchNormGradAttr {
+// value: If false, treats double quotation marks as regular
+// characters inside of the string fields (ignoring RFC 4180, Section 2,
+// Bullet 5).
+// If not specified, defaults to true
+func DecodeCSVUseQuoteDelim(value bool) DecodeCSVAttr {
return func(m optionalAttr) {
- m["data_format"] = value
+ m["use_quote_delim"] = value
}
}
-// FusedBatchNormGradIsTraining sets the optional is_training attribute to value.
+// DecodeCSVNaValue sets the optional na_value attribute to value.
//
-// value: A bool value to indicate the operation is for training (default)
-// or inference.
-// If not specified, defaults to true
-func FusedBatchNormGradIsTraining(value bool) FusedBatchNormGradAttr {
+// value: Additional string to recognize as NA/NaN.
+// If not specified, defaults to ""
+func DecodeCSVNaValue(value string) DecodeCSVAttr {
return func(m optionalAttr) {
- m["is_training"] = value
+ m["na_value"] = value
}
}
-// Gradient for batch normalization.
+// Convert CSV records to tensors. Each column maps to one tensor.
//
-// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
-// The size of 1D Tensors matches the dimension C of the 4D Tensors.
+// RFC 4180 format is expected for the CSV records.
+// (https://tools.ietf.org/html/rfc4180)
+// Note that we allow leading and trailing spaces with int or float field.
//
// Arguments:
-// y_backprop: A 4D Tensor for the gradient with respect to y.
-// x: A 4D Tensor for input data.
-// scale: A 1D Tensor for scaling factor, to scale the normalized x.
-// reserve_space_1: When is_training is True, a 1D Tensor for the computed batch
-// mean to be reused in gradient computation. When is_training is
-// False, a 1D Tensor for the population mean to be reused in both
-// 1st and 2nd order gradient computation.
-// reserve_space_2: When is_training is True, a 1D Tensor for the computed batch
-// variance (inverted variance in the cuDNN case) to be reused in
-// gradient computation. When is_training is False, a 1D Tensor
-// for the population variance to be reused in both 1st and 2nd
-// order gradient computation.
+// records: Each string is a record/row in the csv and all records should have
+// the same format.
+// record_defaults: One tensor per column of the input record, with either a
+// scalar default value for that column or empty if the column is required.
//
-// Returns A 4D Tensor for the gradient with respect to x.A 1D Tensor for the gradient with respect to scale.A 1D Tensor for the gradient with respect to offset.Unused placeholder to match the mean input in FusedBatchNorm.Unused placeholder to match the variance input
-// in FusedBatchNorm.
-func FusedBatchNormGrad(scope *Scope, y_backprop tf.Output, x tf.Output, scale tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output, optional ...FusedBatchNormGradAttr) (x_backprop tf.Output, scale_backprop tf.Output, offset_backprop tf.Output, reserve_space_3 tf.Output, reserve_space_4 tf.Output) {
+// Returns Each tensor will have the same shape as records.
+func DecodeCSV(scope *Scope, records tf.Output, record_defaults []tf.Output, optional ...DecodeCSVAttr) (output []tf.Output) {
if scope.Err() != nil {
return
}
@@ -15589,426 +15696,536 @@ func FusedBatchNormGrad(scope *Scope, y_backprop tf.Output, x tf.Output, scale t
a(attrs)
}
opspec := tf.OpSpec{
- Type: "FusedBatchNormGrad",
+ Type: "DecodeCSV",
Input: []tf.Input{
- y_backprop, x, scale, reserve_space_1, reserve_space_2,
+ records, tf.OutputList(record_defaults),
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
-}
-
-// Computes rectified linear: `max(features, 0)`.
-func Relu(scope *Scope, features tf.Output) (activations tf.Output) {
if scope.Err() != nil {
return
}
- opspec := tf.OpSpec{
- Type: "Relu",
- Input: []tf.Input{
- features,
- },
+ var idx int
+ var err error
+ if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
+ scope.UpdateErr("DecodeCSV", err)
+ return
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return output
}
-// L2 Loss.
+// Returns the rank of a tensor.
//
-// Computes half the L2 norm of a tensor without the `sqrt`:
+// This operation returns an integer representing the rank of `input`.
//
-// output = sum(t ** 2) / 2
+// For example:
//
-// Arguments:
-// t: Typically 2-D, but may have any dimensions.
+// ```
+// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
+// # shape of tensor 't' is [2, 2, 3]
+// rank(t) ==> 3
+// ```
//
-// Returns 0-D.
-func L2Loss(scope *Scope, t tf.Output) (output tf.Output) {
+// **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank
+// of a tensor is the number of indices required to uniquely select each element
+// of the tensor. Rank is also known as "order", "degree", or "ndims."
+func Rank(scope *Scope, input tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "L2Loss",
+ Type: "Rank",
Input: []tf.Input{
- t,
+ input,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// ShapeAttr is an optional argument to Shape.
-type ShapeAttr func(optionalAttr)
-
-// ShapeOutType sets the optional out_type attribute to value.
-// If not specified, defaults to DT_INT32
-func ShapeOutType(value tf.DataType) ShapeAttr {
- return func(m optionalAttr) {
- m["out_type"] = value
- }
-}
-
-// Returns the shape of a tensor.
-//
-// This operation returns a 1-D integer tensor representing the shape of `input`.
-//
-// For example:
-//
-// ```
-// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
-// shape(t) ==> [2, 2, 3]
-// ```
-func Shape(scope *Scope, input tf.Output, optional ...ShapeAttr) (output tf.Output) {
+// Output a fact about factorials.
+func Fact(scope *Scope) (fact tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "Shape",
- Input: []tf.Input{
- input,
- },
- Attrs: attrs,
+ Type: "Fact",
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Computes softmax cross entropy cost and gradients to backpropagate.
-//
-// Inputs are the logits, not probabilities.
+// Makes its input available to the next iteration.
//
// Arguments:
-// features: batch_size x num_classes matrix
-// labels: batch_size x num_classes matrix
-// The caller must ensure that each batch of labels represents a valid
-// probability distribution.
+// data: The tensor to be made available to the next iteration.
//
-// Returns Per example loss (batch_size vector).backpropagated gradients (batch_size x num_classes matrix).
-func SoftmaxCrossEntropyWithLogits(scope *Scope, features tf.Output, labels tf.Output) (loss tf.Output, backprop tf.Output) {
+// Returns The same tensor as `data`.
+func NextIteration(scope *Scope, data tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "SoftmaxCrossEntropyWithLogits",
+ Type: "NextIteration",
Input: []tf.Input{
- features, labels,
+ data,
},
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
+ return op.Output(0)
}
-// Returns x - y element-wise.
+// Creates a dataset that skips `count` elements from the `input_dataset`.
//
-// *NOTE*: `Subtract` supports broadcasting. More about broadcasting
-// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-func Sub(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
+// Arguments:
+//
+// count: A scalar representing the number of elements from the `input_dataset`
+// that should be skipped. If count is -1, skips everything.
+//
+//
+func SkipDataset(scope *Scope, input_dataset tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
opspec := tf.OpSpec{
- Type: "Sub",
+ Type: "SkipDataset",
Input: []tf.Input{
- x, y,
+ input_dataset, count,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Returns a copy of the input tensor.
-func Snapshot(scope *Scope, input tf.Output) (output tf.Output) {
+// Computes hyperbolic tangent of `x` element-wise.
+func Tanh(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Snapshot",
+ Type: "Tanh",
Input: []tf.Input{
- input,
+ x,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Get the value of the tensor specified by its handle.
+// Computes the maximum along segments of a tensor.
+//
+// Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+// segments.
+//
+// Computes a tensor such that
+// \\(output_i = \max_j(data_j)\\) where `max` is over `j` such
+// that `segment_ids[j] == i`.
+//
+// If the max is empty for a given segment ID `i`, `output[i] = 0`.
+//
+// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMax.png" alt>
+// </div>
//
// Arguments:
-// handle: The handle for a tensor stored in the session state.
-// dtype: The type of the output value.
//
-// Returns The tensor for the given handle.
-func GetSessionTensor(scope *Scope, handle tf.Output, dtype tf.DataType) (value tf.Output) {
+// segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s
+// first dimension. Values should be sorted and can be repeated.
+//
+// Returns Has same shape as data, except for dimension 0 which
+// has size `k`, the number of segments.
+func SegmentMax(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtype": dtype}
opspec := tf.OpSpec{
- Type: "GetSessionTensor",
+ Type: "SegmentMax",
Input: []tf.Input{
- handle,
+ data, segment_ids,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// ResourceApplyProximalGradientDescentAttr is an optional argument to ResourceApplyProximalGradientDescent.
-type ResourceApplyProximalGradientDescentAttr func(optionalAttr)
+// AvgPoolGradAttr is an optional argument to AvgPoolGrad.
+type AvgPoolGradAttr func(optionalAttr)
-// ResourceApplyProximalGradientDescentUseLocking sets the optional use_locking attribute to value.
+// AvgPoolGradDataFormat sets the optional data_format attribute to value.
//
-// value: If True, the subtraction will be protected by a lock;
-// otherwise the behavior is undefined, but may exhibit less contention.
-// If not specified, defaults to false
-func ResourceApplyProximalGradientDescentUseLocking(value bool) ResourceApplyProximalGradientDescentAttr {
+// value: Specify the data format of the input and output data. With the
+// default format "NHWC", the data is stored in the order of:
+// [batch, in_height, in_width, in_channels].
+// Alternatively, the format could be "NCHW", the data storage order of:
+// [batch, in_channels, in_height, in_width].
+// If not specified, defaults to "NHWC"
+func AvgPoolGradDataFormat(value string) AvgPoolGradAttr {
return func(m optionalAttr) {
- m["use_locking"] = value
+ m["data_format"] = value
}
}
-// Update '*var' as FOBOS algorithm with fixed learning rate.
-//
-// prox_v = var - alpha * delta
-// var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
+// Computes gradients of the average pooling function.
//
// Arguments:
-// var_: Should be from a Variable().
-// alpha: Scaling factor. Must be a scalar.
-// l1: L1 regularization. Must be a scalar.
-// l2: L2 regularization. Must be a scalar.
-// delta: The change.
+// orig_input_shape: 1-D. Shape of the original input to `avg_pool`.
+// grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t.
+// the output of `avg_pool`.
+// ksize: The size of the sliding window for each dimension of the input.
+// strides: The stride of the sliding window for each dimension of the input.
+// padding: The type of padding algorithm to use.
//
-// Returns the created operation.
-func ResourceApplyProximalGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, l1 tf.Output, l2 tf.Output, delta tf.Output, optional ...ResourceApplyProximalGradientDescentAttr) (o *tf.Operation) {
+// Returns 4-D. Gradients w.r.t. the input of `avg_pool`.
+func AvgPoolGrad(scope *Scope, orig_input_shape tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPoolGradAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ResourceApplyProximalGradientDescent",
+ Type: "AvgPoolGrad",
Input: []tf.Input{
- var_, alpha, l1, l2, delta,
+ orig_input_shape, grad,
},
Attrs: attrs,
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// 2D fast Fourier transform.
-//
-// Computes the 2-dimensional discrete Fourier transform over the inner-most
-// 2 dimensions of `input`.
+// StageClearAttr is an optional argument to StageClear.
+type StageClearAttr func(optionalAttr)
+
+// StageClearCapacity sets the optional capacity attribute to value.
+// If not specified, defaults to 0
//
-// Arguments:
-// input: A complex64 tensor.
+// REQUIRES: value >= 0
+func StageClearCapacity(value int64) StageClearAttr {
+ return func(m optionalAttr) {
+ m["capacity"] = value
+ }
+}
+
+// StageClearMemoryLimit sets the optional memory_limit attribute to value.
+// If not specified, defaults to 0
//
-// Returns A complex64 tensor of the same shape as `input`. The inner-most 2
-// dimensions of `input` are replaced with their 2D Fourier transform.
+// REQUIRES: value >= 0
+func StageClearMemoryLimit(value int64) StageClearAttr {
+ return func(m optionalAttr) {
+ m["memory_limit"] = value
+ }
+}
+
+// StageClearContainer sets the optional container attribute to value.
+// If not specified, defaults to ""
+func StageClearContainer(value string) StageClearAttr {
+ return func(m optionalAttr) {
+ m["container"] = value
+ }
+}
+
+// StageClearSharedName sets the optional shared_name attribute to value.
+// If not specified, defaults to ""
+func StageClearSharedName(value string) StageClearAttr {
+ return func(m optionalAttr) {
+ m["shared_name"] = value
+ }
+}
+
+// Op removes all elements in the underlying container.
//
-// @compatibility(numpy)
-// Equivalent to np.fft.fft2
-// @end_compatibility
-func FFT2D(scope *Scope, input tf.Output) (output tf.Output) {
+// Returns the created operation.
+func StageClear(scope *Scope, dtypes []tf.DataType, optional ...StageClearAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"dtypes": dtypes}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "FFT2D",
- Input: []tf.Input{
- input,
- },
+ Type: "StageClear",
+
+ Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// Creates a tensor filled with a scalar value.
+// ComputeAccidentalHitsAttr is an optional argument to ComputeAccidentalHits.
+type ComputeAccidentalHitsAttr func(optionalAttr)
+
+// ComputeAccidentalHitsSeed sets the optional seed attribute to value.
//
-// This operation creates a tensor of shape `dims` and fills it with `value`.
+// value: If either seed or seed2 are set to be non-zero, the random number
+// generator is seeded by the given seed. Otherwise, it is seeded by a
+// random seed.
+// If not specified, defaults to 0
+func ComputeAccidentalHitsSeed(value int64) ComputeAccidentalHitsAttr {
+ return func(m optionalAttr) {
+ m["seed"] = value
+ }
+}
+
+// ComputeAccidentalHitsSeed2 sets the optional seed2 attribute to value.
//
-// For example:
+// value: An second seed to avoid seed collision.
+// If not specified, defaults to 0
+func ComputeAccidentalHitsSeed2(value int64) ComputeAccidentalHitsAttr {
+ return func(m optionalAttr) {
+ m["seed2"] = value
+ }
+}
+
+// Computes the ids of the positions in sampled_candidates that match true_labels.
//
-// ```
-// # Output tensor has shape [2, 3].
-// fill([2, 3], 9) ==> [[9, 9, 9]
-// [9, 9, 9]]
-// ```
+// When doing log-odds NCE, the result of this op should be passed through a
+// SparseToDense op, then added to the logits of the sampled candidates. This has
+// the effect of 'removing' the sampled labels that match the true labels by
+// making the classifier sure that they are sampled labels.
//
// Arguments:
-// dims: 1-D. Represents the shape of the output tensor.
-// value: 0-D (scalar). Value to fill the returned tensor.
+// true_classes: The true_classes output of UnpackSparseLabels.
+// sampled_candidates: The sampled_candidates output of CandidateSampler.
+// num_true: Number of true labels per context.
//
-// @compatibility(numpy)
-// Equivalent to np.full
-// @end_compatibility
-func Fill(scope *Scope, dims tf.Output, value tf.Output) (output tf.Output) {
+// Returns A vector of indices corresponding to rows of true_candidates.A vector of IDs of positions in sampled_candidates that match a true_label
+// for the row with the corresponding index in indices.A vector of the same length as indices and ids, in which each element
+// is -FLOAT_MAX.
+func ComputeAccidentalHits(scope *Scope, true_classes tf.Output, sampled_candidates tf.Output, num_true int64, optional ...ComputeAccidentalHitsAttr) (indices tf.Output, ids tf.Output, weights tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"num_true": num_true}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "Fill",
+ Type: "ComputeAccidentalHits",
Input: []tf.Input{
- dims, value,
+ true_classes, sampled_candidates,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// Inverse 2D fast Fourier transform.
-//
-// Computes the inverse 2-dimensional discrete Fourier transform over the
-// inner-most 2 dimensions of `input`.
-//
-// Arguments:
-// input: A complex64 tensor.
-//
-// Returns A complex64 tensor of the same shape as `input`. The inner-most 2
-// dimensions of `input` are replaced with their inverse 2D Fourier transform.
+// Computes sigmoid of `x` element-wise.
//
-// @compatibility(numpy)
-// Equivalent to np.fft.ifft2
-// @end_compatibility
-func IFFT2D(scope *Scope, input tf.Output) (output tf.Output) {
+// Specifically, `y = 1 / (1 + exp(-x))`.
+func Sigmoid(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "IFFT2D",
+ Type: "Sigmoid",
Input: []tf.Input{
- input,
+ x,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// TensorArrayV3Attr is an optional argument to TensorArrayV3.
-type TensorArrayV3Attr func(optionalAttr)
+// RandomStandardNormalAttr is an optional argument to RandomStandardNormal.
+type RandomStandardNormalAttr func(optionalAttr)
-// TensorArrayV3ElementShape sets the optional element_shape attribute to value.
+// RandomStandardNormalSeed sets the optional seed attribute to value.
//
-// value: The expected shape of an element, if known. Used to
-// validate the shapes of TensorArray elements. If this shape is not
-// fully specified, gathering zero-size TensorArrays is an error.
-// If not specified, defaults to <unknown_rank:true >
-func TensorArrayV3ElementShape(value tf.Shape) TensorArrayV3Attr {
+// value: If either `seed` or `seed2` are set to be non-zero, the random number
+// generator is seeded by the given seed. Otherwise, it is seeded by a
+// random seed.
+// If not specified, defaults to 0
+func RandomStandardNormalSeed(value int64) RandomStandardNormalAttr {
return func(m optionalAttr) {
- m["element_shape"] = value
+ m["seed"] = value
}
}
-// TensorArrayV3DynamicSize sets the optional dynamic_size attribute to value.
+// RandomStandardNormalSeed2 sets the optional seed2 attribute to value.
//
-// value: A boolean that determines whether writes to the TensorArray
-// are allowed to grow the size. By default, this is not allowed.
-// If not specified, defaults to false
-func TensorArrayV3DynamicSize(value bool) TensorArrayV3Attr {
+// value: A second seed to avoid seed collision.
+// If not specified, defaults to 0
+func RandomStandardNormalSeed2(value int64) RandomStandardNormalAttr {
return func(m optionalAttr) {
- m["dynamic_size"] = value
+ m["seed2"] = value
}
}
-// TensorArrayV3ClearAfterRead sets the optional clear_after_read attribute to value.
+// Outputs random values from a normal distribution.
//
-// value: If true (default), Tensors in the TensorArray are cleared
-// after being read. This disables multiple read semantics but allows early
-// release of memory.
-// If not specified, defaults to true
-func TensorArrayV3ClearAfterRead(value bool) TensorArrayV3Attr {
+// The generated values will have mean 0 and standard deviation 1.
+//
+// Arguments:
+// shape: The shape of the output tensor.
+// dtype: The type of the output.
+//
+// Returns A tensor of the specified shape filled with random normal values.
+func RandomStandardNormal(scope *Scope, shape tf.Output, dtype tf.DataType, optional ...RandomStandardNormalAttr) (output tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{"dtype": dtype}
+ for _, a := range optional {
+ a(attrs)
+ }
+ opspec := tf.OpSpec{
+ Type: "RandomStandardNormal",
+ Input: []tf.Input{
+ shape,
+ },
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
+// FusedBatchNormAttr is an optional argument to FusedBatchNorm.
+type FusedBatchNormAttr func(optionalAttr)
+
+// FusedBatchNormEpsilon sets the optional epsilon attribute to value.
+//
+// value: A small float number added to the variance of x.
+// If not specified, defaults to 0.0001
+func FusedBatchNormEpsilon(value float32) FusedBatchNormAttr {
return func(m optionalAttr) {
- m["clear_after_read"] = value
+ m["epsilon"] = value
}
}
-// TensorArrayV3IdenticalElementShapes sets the optional identical_element_shapes attribute to value.
+// FusedBatchNormDataFormat sets the optional data_format attribute to value.
//
-// value: If true (default is false), then all
-// elements in the TensorArray will be expected to have have identical shapes.
-// This allows certain behaviors, like dynamically checking for
-// consistent shapes on write, and being able to fill in properly
-// shaped zero tensors on stack -- even if the element_shape attribute
-// is not fully defined.
-// If not specified, defaults to false
-func TensorArrayV3IdenticalElementShapes(value bool) TensorArrayV3Attr {
+// value: The data format for x and y. Either "NHWC" (default) or "NCHW".
+// If not specified, defaults to "NHWC"
+func FusedBatchNormDataFormat(value string) FusedBatchNormAttr {
return func(m optionalAttr) {
- m["identical_element_shapes"] = value
+ m["data_format"] = value
}
}
-// TensorArrayV3TensorArrayName sets the optional tensor_array_name attribute to value.
+// FusedBatchNormIsTraining sets the optional is_training attribute to value.
//
-// value: Overrides the name used for the temporary tensor_array
-// resource. Default value is the name of the 'TensorArray' op (which
-// is guaranteed unique).
-// If not specified, defaults to ""
-func TensorArrayV3TensorArrayName(value string) TensorArrayV3Attr {
+// value: A bool value to indicate the operation is for training (default)
+// or inference.
+// If not specified, defaults to true
+func FusedBatchNormIsTraining(value bool) FusedBatchNormAttr {
return func(m optionalAttr) {
- m["tensor_array_name"] = value
+ m["is_training"] = value
}
}
-// An array of Tensors of given size.
+// Batch normalization.
//
-// Write data via Write and read via Read or Pack.
+// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
+// The size of 1D Tensors matches the dimension C of the 4D Tensors.
//
// Arguments:
-// size: The size of the array.
-// dtype: The type of the elements on the tensor_array.
+// x: A 4D Tensor for input data.
+// scale: A 1D Tensor for scaling factor, to scale the normalized x.
+// offset: A 1D Tensor for offset, to shift to the normalized x.
+// mean: A 1D Tensor for population mean. Used for inference only;
+// must be empty for training.
+// variance: A 1D Tensor for population variance. Used for inference only;
+// must be empty for training.
//
-// Returns The handle to the TensorArray.A scalar used to control gradient flow.
-func TensorArrayV3(scope *Scope, size tf.Output, dtype tf.DataType, optional ...TensorArrayV3Attr) (handle tf.Output, flow tf.Output) {
+// Returns A 4D Tensor for output data.A 1D Tensor for the computed batch mean, to be used by TensorFlow
+// to compute the running mean.A 1D Tensor for the computed batch variance, to be used by
+// TensorFlow to compute the running variance.A 1D Tensor for the computed batch mean, to be reused
+// in the gradient computation.A 1D Tensor for the computed batch variance (inverted variance
+// in the cuDNN case), to be reused in the gradient computation.
+func FusedBatchNorm(scope *Scope, x tf.Output, scale tf.Output, offset tf.Output, mean tf.Output, variance tf.Output, optional ...FusedBatchNormAttr) (y tf.Output, batch_mean tf.Output, batch_variance tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtype": dtype}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "TensorArrayV3",
+ Type: "FusedBatchNorm",
Input: []tf.Input{
- size,
+ x, scale, offset, mean, variance,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
+ return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
}
-// ResourceApplyGradientDescentAttr is an optional argument to ResourceApplyGradientDescent.
-type ResourceApplyGradientDescentAttr func(optionalAttr)
+// Computes tan of x element-wise.
+func Tan(scope *Scope, x tf.Output) (y tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "Tan",
+ Input: []tf.Input{
+ x,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
-// ResourceApplyGradientDescentUseLocking sets the optional use_locking attribute to value.
+// FusedBatchNormV2Attr is an optional argument to FusedBatchNormV2.
+type FusedBatchNormV2Attr func(optionalAttr)
+
+// FusedBatchNormV2Epsilon sets the optional epsilon attribute to value.
//
-// value: If `True`, the subtraction will be protected by a lock;
-// otherwise the behavior is undefined, but may exhibit less contention.
-// If not specified, defaults to false
-func ResourceApplyGradientDescentUseLocking(value bool) ResourceApplyGradientDescentAttr {
+// value: A small float number added to the variance of x.
+// If not specified, defaults to 0.0001
+func FusedBatchNormV2Epsilon(value float32) FusedBatchNormV2Attr {
return func(m optionalAttr) {
- m["use_locking"] = value
+ m["epsilon"] = value
}
}
-// Update '*var' by subtracting 'alpha' * 'delta' from it.
+// FusedBatchNormV2DataFormat sets the optional data_format attribute to value.
+//
+// value: The data format for x and y. Either "NHWC" (default) or "NCHW".
+// If not specified, defaults to "NHWC"
+func FusedBatchNormV2DataFormat(value string) FusedBatchNormV2Attr {
+ return func(m optionalAttr) {
+ m["data_format"] = value
+ }
+}
+
+// FusedBatchNormV2IsTraining sets the optional is_training attribute to value.
+//
+// value: A bool value to indicate the operation is for training (default)
+// or inference.
+// If not specified, defaults to true
+func FusedBatchNormV2IsTraining(value bool) FusedBatchNormV2Attr {
+ return func(m optionalAttr) {
+ m["is_training"] = value
+ }
+}
+
+// Batch normalization.
+//
+// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
+// The size of 1D Tensors matches the dimension C of the 4D Tensors.
//
// Arguments:
-// var_: Should be from a Variable().
-// alpha: Scaling factor. Must be a scalar.
-// delta: The change.
+// x: A 4D Tensor for input data.
+// scale: A 1D Tensor for scaling factor, to scale the normalized x.
+// offset: A 1D Tensor for offset, to shift to the normalized x.
+// mean: A 1D Tensor for population mean. Used for inference only;
+// must be empty for training.
+// variance: A 1D Tensor for population variance. Used for inference only;
+// must be empty for training.
//
-// Returns the created operation.
-func ResourceApplyGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, delta tf.Output, optional ...ResourceApplyGradientDescentAttr) (o *tf.Operation) {
+// Returns A 4D Tensor for output data.A 1D Tensor for the computed batch mean, to be used by TensorFlow
+// to compute the running mean.A 1D Tensor for the computed batch variance, to be used by
+// TensorFlow to compute the running variance.A 1D Tensor for the computed batch mean, to be reused
+// in the gradient computation.A 1D Tensor for the computed batch variance (inverted variance
+// in the cuDNN case), to be reused in the gradient computation.
+func FusedBatchNormV2(scope *Scope, x tf.Output, scale tf.Output, offset tf.Output, mean tf.Output, variance tf.Output, optional ...FusedBatchNormV2Attr) (y tf.Output, batch_mean tf.Output, batch_variance tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output) {
if scope.Err() != nil {
return
}
@@ -16017,13 +16234,14 @@ func ResourceApplyGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output,
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ResourceApplyGradientDescent",
+ Type: "FusedBatchNormV2",
Input: []tf.Input{
- var_, alpha, delta,
+ x, scale, offset, mean, variance,
},
Attrs: attrs,
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
}
// MultinomialAttr is an optional argument to Multinomial.
@@ -16086,35 +16304,124 @@ func Multinomial(scope *Scope, logits tf.Output, num_samples tf.Output, optional
return op.Output(0)
}
-// ResourceSparseApplyAdagradDAAttr is an optional argument to ResourceSparseApplyAdagradDA.
-type ResourceSparseApplyAdagradDAAttr func(optionalAttr)
+// EncodeJpegAttr is an optional argument to EncodeJpeg.
+type EncodeJpegAttr func(optionalAttr)
-// ResourceSparseApplyAdagradDAUseLocking sets the optional use_locking attribute to value.
+// EncodeJpegFormat sets the optional format attribute to value.
//
-// value: If True, updating of the var and accum tensors will be protected by
-// a lock; otherwise the behavior is undefined, but may exhibit less contention.
+// value: Per pixel image format.
+// If not specified, defaults to ""
+func EncodeJpegFormat(value string) EncodeJpegAttr {
+ return func(m optionalAttr) {
+ m["format"] = value
+ }
+}
+
+// EncodeJpegQuality sets the optional quality attribute to value.
+//
+// value: Quality of the compression from 0 to 100 (higher is better and slower).
+// If not specified, defaults to 95
+func EncodeJpegQuality(value int64) EncodeJpegAttr {
+ return func(m optionalAttr) {
+ m["quality"] = value
+ }
+}
+
+// EncodeJpegProgressive sets the optional progressive attribute to value.
+//
+// value: If True, create a JPEG that loads progressively (coarse to fine).
// If not specified, defaults to false
-func ResourceSparseApplyAdagradDAUseLocking(value bool) ResourceSparseApplyAdagradDAAttr {
+func EncodeJpegProgressive(value bool) EncodeJpegAttr {
return func(m optionalAttr) {
- m["use_locking"] = value
+ m["progressive"] = value
}
}
-// Update entries in '*var' and '*accum' according to the proximal adagrad scheme.
+// EncodeJpegOptimizeSize sets the optional optimize_size attribute to value.
+//
+// value: If True, spend CPU/RAM to reduce size with no quality change.
+// If not specified, defaults to false
+func EncodeJpegOptimizeSize(value bool) EncodeJpegAttr {
+ return func(m optionalAttr) {
+ m["optimize_size"] = value
+ }
+}
+
+// EncodeJpegChromaDownsampling sets the optional chroma_downsampling attribute to value.
+//
+// value: See http://en.wikipedia.org/wiki/Chroma_subsampling.
+// If not specified, defaults to true
+func EncodeJpegChromaDownsampling(value bool) EncodeJpegAttr {
+ return func(m optionalAttr) {
+ m["chroma_downsampling"] = value
+ }
+}
+
+// EncodeJpegDensityUnit sets the optional density_unit attribute to value.
+//
+// value: Unit used to specify `x_density` and `y_density`:
+// pixels per inch (`'in'`) or centimeter (`'cm'`).
+// If not specified, defaults to "in"
+func EncodeJpegDensityUnit(value string) EncodeJpegAttr {
+ return func(m optionalAttr) {
+ m["density_unit"] = value
+ }
+}
+
+// EncodeJpegXDensity sets the optional x_density attribute to value.
+//
+// value: Horizontal pixels per density unit.
+// If not specified, defaults to 300
+func EncodeJpegXDensity(value int64) EncodeJpegAttr {
+ return func(m optionalAttr) {
+ m["x_density"] = value
+ }
+}
+
+// EncodeJpegYDensity sets the optional y_density attribute to value.
+//
+// value: Vertical pixels per density unit.
+// If not specified, defaults to 300
+func EncodeJpegYDensity(value int64) EncodeJpegAttr {
+ return func(m optionalAttr) {
+ m["y_density"] = value
+ }
+}
+
+// EncodeJpegXmpMetadata sets the optional xmp_metadata attribute to value.
+//
+// value: If not empty, embed this XMP metadata in the image header.
+// If not specified, defaults to ""
+func EncodeJpegXmpMetadata(value string) EncodeJpegAttr {
+ return func(m optionalAttr) {
+ m["xmp_metadata"] = value
+ }
+}
+
+// JPEG-encode an image.
+//
+// `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`.
+//
+// The attr `format` can be used to override the color format of the encoded
+// output. Values can be:
+//
+// * `''`: Use a default format based on the number of channels in the image.
+// * `grayscale`: Output a grayscale JPEG image. The `channels` dimension
+// of `image` must be 1.
+// * `rgb`: Output an RGB JPEG image. The `channels` dimension
+// of `image` must be 3.
+//
+// If `format` is not specified or is the empty string, a default format is picked
+// in function of the number of channels in `image`:
+//
+// * 1: Output a grayscale image.
+// * 3: Output an RGB image.
//
// Arguments:
-// var_: Should be from a Variable().
-// gradient_accumulator: Should be from a Variable().
-// gradient_squared_accumulator: Should be from a Variable().
-// grad: The gradient.
-// indices: A vector of indices into the first dimension of var and accum.
-// lr: Learning rate. Must be a scalar.
-// l1: L1 regularization. Must be a scalar.
-// l2: L2 regularization. Must be a scalar.
-// global_step: Training step number. Must be a scalar.
+// image: 3-D with shape `[height, width, channels]`.
//
-// Returns the created operation.
-func ResourceSparseApplyAdagradDA(scope *Scope, var_ tf.Output, gradient_accumulator tf.Output, gradient_squared_accumulator tf.Output, grad tf.Output, indices tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, global_step tf.Output, optional ...ResourceSparseApplyAdagradDAAttr) (o *tf.Operation) {
+// Returns 0-D. JPEG-encoded image.
+func EncodeJpeg(scope *Scope, image tf.Output, optional ...EncodeJpegAttr) (contents tf.Output) {
if scope.Err() != nil {
return
}
@@ -16123,81 +16430,135 @@ func ResourceSparseApplyAdagradDA(scope *Scope, var_ tf.Output, gradient_accumul
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ResourceSparseApplyAdagradDA",
+ Type: "EncodeJpeg",
Input: []tf.Input{
- var_, gradient_accumulator, gradient_squared_accumulator, grad, indices, lr, l1, l2, global_step,
+ image,
},
Attrs: attrs,
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Computes softmax cross entropy cost and gradients to backpropagate.
-//
-// Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept
-// a matrix of label probabilities, but rather a single label per row
-// of features. This label is considered to have probability 1.0 for the
-// given row.
+// MaxPoolGradAttr is an optional argument to MaxPoolGrad.
+type MaxPoolGradAttr func(optionalAttr)
+
+// MaxPoolGradDataFormat sets the optional data_format attribute to value.
//
-// Inputs are the logits, not probabilities.
+// value: Specify the data format of the input and output data. With the
+// default format "NHWC", the data is stored in the order of:
+// [batch, in_height, in_width, in_channels].
+// Alternatively, the format could be "NCHW", the data storage order of:
+// [batch, in_channels, in_height, in_width].
+// If not specified, defaults to "NHWC"
+func MaxPoolGradDataFormat(value string) MaxPoolGradAttr {
+ return func(m optionalAttr) {
+ m["data_format"] = value
+ }
+}
+
+// Computes gradients of the maxpooling function.
//
// Arguments:
-// features: batch_size x num_classes matrix
-// labels: batch_size vector with values in [0, num_classes).
-// This is the label for the given minibatch entry.
+// orig_input: The original input tensor.
+// orig_output: The original output tensor.
+// grad: 4-D. Gradients w.r.t. the output of `max_pool`.
+// ksize: The size of the window for each dimension of the input tensor.
+// strides: The stride of the sliding window for each dimension of the
+// input tensor.
+// padding: The type of padding algorithm to use.
//
-// Returns Per example loss (batch_size vector).backpropagated gradients (batch_size x num_classes matrix).
-func SparseSoftmaxCrossEntropyWithLogits(scope *Scope, features tf.Output, labels tf.Output) (loss tf.Output, backprop tf.Output) {
+// Returns Gradients w.r.t. the input to `max_pool`.
+func MaxPoolGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolGradAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "SparseSoftmaxCrossEntropyWithLogits",
+ Type: "MaxPoolGrad",
Input: []tf.Input{
- features, labels,
+ orig_input, orig_output, grad,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
+ return op.Output(0)
}
-// Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor.
-//
-// This operation folds the padded areas of `input` by `MirrorPad` according to the
-// `paddings` you specify. `paddings` must be the same as `paddings` argument
-// given to the corresponding `MirrorPad` op.
+// CropAndResizeAttr is an optional argument to CropAndResize.
+type CropAndResizeAttr func(optionalAttr)
+
+// CropAndResizeMethod sets the optional method attribute to value.
//
-// The folded size of each dimension D of the output is:
+// value: A string specifying the interpolation method. Only 'bilinear' is
+// supported for now.
+// If not specified, defaults to "bilinear"
+func CropAndResizeMethod(value string) CropAndResizeAttr {
+ return func(m optionalAttr) {
+ m["method"] = value
+ }
+}
+
+// CropAndResizeExtrapolationValue sets the optional extrapolation_value attribute to value.
//
-// `input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`
+// value: Value used for extrapolation, when applicable.
+// If not specified, defaults to 0
+func CropAndResizeExtrapolationValue(value float32) CropAndResizeAttr {
+ return func(m optionalAttr) {
+ m["extrapolation_value"] = value
+ }
+}
+
+// Extracts crops from the input image tensor and bilinearly resizes them (possibly
//
-// For example:
+// with aspect ratio change) to a common output size specified by `crop_size`. This
+// is more general than the `crop_to_bounding_box` op which extracts a fixed size
+// slice from the input image and does not allow resizing or aspect ratio change.
//
-// ```
-// # 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]].
-// # 'paddings' is [[0, 1]], [0, 1]].
-// # 'mode' is SYMMETRIC.
-// # rank of 't' is 2.
-// pad(t, paddings) ==> [[ 1, 5]
-// [11, 28]]
-// ```
+// Returns a tensor with `crops` from the input `image` at positions defined at the
+// bounding box locations in `boxes`. The cropped boxes are all resized (with
+// bilinear interpolation) to a fixed `size = [crop_height, crop_width]`. The
+// result is a 4-D tensor `[num_boxes, crop_height, crop_width, depth]`. The
+// resizing is corner aligned. In particular, if `boxes = [[0, 0, 1, 1]]`, the
+// method will give identical results to using `tf.image.resize_bilinear()`
+// with `align_corners=True`.
//
// Arguments:
-// input: The input tensor to be folded.
-// paddings: A two-column matrix specifying the padding sizes. The number of
-// rows must be the same as the rank of `input`.
-// mode: The mode used in the `MirrorPad` op.
+// image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
+// Both `image_height` and `image_width` need to be positive.
+// boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
+// specifies the coordinates of a box in the `box_ind[i]` image and is specified
+// in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
+// `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
+// `[0, 1]` interval of normalized image height is mapped to
+// `[0, image_height - 1]` in image height coordinates. We do allow `y1` > `y2`, in
+// which case the sampled crop is an up-down flipped version of the original
+// image. The width dimension is treated similarly. Normalized coordinates
+// outside the `[0, 1]` range are allowed, in which case we use
+// `extrapolation_value` to extrapolate the input image values.
+// box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
+// The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
+// crop_size: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All
+// cropped image patches are resized to this size. The aspect ratio of the image
+// content is not preserved. Both `crop_height` and `crop_width` need to be
+// positive.
//
-// Returns The folded tensor.
-func MirrorPadGrad(scope *Scope, input tf.Output, paddings tf.Output, mode string) (output tf.Output) {
+// Returns A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
+func CropAndResize(scope *Scope, image tf.Output, boxes tf.Output, box_ind tf.Output, crop_size tf.Output, optional ...CropAndResizeAttr) (crops tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"mode": mode}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "MirrorPadGrad",
+ Type: "CropAndResize",
Input: []tf.Input{
- input, paddings,
+ image, boxes, box_ind, crop_size,
},
Attrs: attrs,
}
@@ -16205,258 +16566,147 @@ func MirrorPadGrad(scope *Scope, input tf.Output, paddings tf.Output, mode strin
return op.Output(0)
}
-// Computes the inverse permutation of a tensor.
-//
-// This operation computes the inverse of an index permutation. It takes a 1-D
-// integer tensor `x`, which represents the indices of a zero-based array, and
-// swaps each value with its index position. In other words, for an output tensor
-// `y` and an input tensor `x`, this operation computes the following:
-//
-// `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`
-//
-// The values must include 0. There can be no duplicate values or negative values.
+// ResourceApplyPowerSignAttr is an optional argument to ResourceApplyPowerSign.
+type ResourceApplyPowerSignAttr func(optionalAttr)
+
+// ResourceApplyPowerSignUseLocking sets the optional use_locking attribute to value.
//
-// For example:
+// value: If `True`, updating of the var and m tensors is
+// protected by a lock; otherwise the behavior is undefined, but may exhibit less
+// contention.
+// If not specified, defaults to false
+func ResourceApplyPowerSignUseLocking(value bool) ResourceApplyPowerSignAttr {
+ return func(m optionalAttr) {
+ m["use_locking"] = value
+ }
+}
+
+// Update '*var' according to the AddSign update.
//
-// ```
-// # tensor `x` is [3, 4, 0, 2, 1]
-// invert_permutation(x) ==> [2, 4, 3, 0, 1]
-// ```
+// m_t <- beta1 * m_{t-1} + (1 - beta1) * g
+// update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g
+// variable <- variable - lr_t * update
//
// Arguments:
-// x: 1-D.
+// var_: Should be from a Variable().
+// m: Should be from a Variable().
+// lr: Scaling factor. Must be a scalar.
+// logbase: Must be a scalar.
+// sign_decay: Must be a scalar.
+// beta: Must be a scalar.
+// grad: The gradient.
//
-// Returns 1-D.
-func InvertPermutation(scope *Scope, x tf.Output) (y tf.Output) {
+// Returns the created operation.
+func ResourceApplyPowerSign(scope *Scope, var_ tf.Output, m tf.Output, lr tf.Output, logbase tf.Output, sign_decay tf.Output, beta tf.Output, grad tf.Output, optional ...ResourceApplyPowerSignAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "InvertPermutation",
+ Type: "ResourceApplyPowerSign",
Input: []tf.Input{
- x,
+ var_, m, lr, logbase, sign_decay, beta, grad,
},
+ Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// Reverses specific dimensions of a tensor.
-//
-// Given a `tensor`, and a `bool` tensor `dims` representing the dimensions
-// of `tensor`, this operation reverses each dimension i of `tensor` where
-// `dims[i]` is `True`.
-//
-// `tensor` can have up to 8 dimensions. The number of dimensions
-// of `tensor` must equal the number of elements in `dims`. In other words:
-//
-// `rank(tensor) = size(dims)`
-//
-// For example:
-//
-// ```
-// # tensor 't' is [[[[ 0, 1, 2, 3],
-// # [ 4, 5, 6, 7],
-// # [ 8, 9, 10, 11]],
-// # [[12, 13, 14, 15],
-// # [16, 17, 18, 19],
-// # [20, 21, 22, 23]]]]
-// # tensor 't' shape is [1, 2, 3, 4]
-//
-// # 'dims' is [False, False, False, True]
-// reverse(t, dims) ==> [[[[ 3, 2, 1, 0],
-// [ 7, 6, 5, 4],
-// [ 11, 10, 9, 8]],
-// [[15, 14, 13, 12],
-// [19, 18, 17, 16],
-// [23, 22, 21, 20]]]]
-//
-// # 'dims' is [False, True, False, False]
-// reverse(t, dims) ==> [[[[12, 13, 14, 15],
-// [16, 17, 18, 19],
-// [20, 21, 22, 23]
-// [[ 0, 1, 2, 3],
-// [ 4, 5, 6, 7],
-// [ 8, 9, 10, 11]]]]
-//
-// # 'dims' is [False, False, True, False]
-// reverse(t, dims) ==> [[[[8, 9, 10, 11],
-// [4, 5, 6, 7],
-// [0, 1, 2, 3]]
-// [[20, 21, 22, 23],
-// [16, 17, 18, 19],
-// [12, 13, 14, 15]]]]
-// ```
-//
-// Arguments:
-// tensor: Up to 8-D.
-// dims: 1-D. The dimensions to reverse.
+// Deprecated. Disallowed in GraphDef version >= 2.
//
-// Returns The same shape as `tensor`.
-func Reverse(scope *Scope, tensor tf.Output, dims tf.Output) (output tf.Output) {
+// DEPRECATED at GraphDef version 2: Use AdjustContrastv2 instead
+func AdjustContrast(scope *Scope, images tf.Output, contrast_factor tf.Output, min_value tf.Output, max_value tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Reverse",
+ Type: "AdjustContrast",
Input: []tf.Input{
- tensor, dims,
+ images, contrast_factor, min_value, max_value,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Fills empty rows in the input 2-D `SparseTensor` with a default value.
-//
-// The input `SparseTensor` is represented via the tuple of inputs
-// (`indices`, `values`, `dense_shape`). The output `SparseTensor` has the
-// same `dense_shape` but with indices `output_indices` and values
-// `output_values`.
-//
-// This op inserts a single entry for every row that doesn't have any values.
-// The index is created as `[row, 0, ..., 0]` and the inserted value
-// is `default_value`.
-//
-// For example, suppose `sp_input` has shape `[5, 6]` and non-empty values:
-//
-// [0, 1]: a
-// [0, 3]: b
-// [2, 0]: c
-// [3, 1]: d
-//
-// Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values:
-//
-// [0, 1]: a
-// [0, 3]: b
-// [1, 0]: default_value
-// [2, 0]: c
-// [3, 1]: d
-// [4, 0]: default_value
-//
-// The output `SparseTensor` will be in row-major order and will have the
-// same shape as the input.
-//
-// This op also returns an indicator vector shaped `[dense_shape[0]]` such that
-//
-// empty_row_indicator[i] = True iff row i was an empty row.
-//
-// And a reverse index map vector shaped `[indices.shape[0]]` that is used during
-// backpropagation,
-//
-// reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :]
+// Table initializer that takes two tensors for keys and values respectively.
//
// Arguments:
-// indices: 2-D. the indices of the sparse tensor.
-// values: 1-D. the values of the sparse tensor.
-// dense_shape: 1-D. the shape of the sparse tensor.
-// default_value: 0-D. default value to insert into location `[row, 0, ..., 0]`
-// for rows missing from the input sparse tensor.
-// output indices: 2-D. the indices of the filled sparse tensor.
+// table_handle: Handle to a table which will be initialized.
+// keys: Keys of type Tkey.
+// values: Values of type Tval.
//
-// Returns 1-D. the values of the filled sparse tensor.1-D. whether the dense row was missing in the
-// input sparse tensor.1-D. a map from the input indices to the output indices.
-func SparseFillEmptyRows(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output, default_value tf.Output) (output_indices tf.Output, output_values tf.Output, empty_row_indicator tf.Output, reverse_index_map tf.Output) {
+// Returns the created operation.
+func InitializeTableV2(scope *Scope, table_handle tf.Output, keys tf.Output, values tf.Output) (o *tf.Operation) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "SparseFillEmptyRows",
+ Type: "InitializeTableV2",
Input: []tf.Input{
- indices, values, dense_shape, default_value,
+ table_handle, keys, values,
},
}
- op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
+ return scope.AddOperation(opspec)
}
-// Conv2DAttr is an optional argument to Conv2D.
-type Conv2DAttr func(optionalAttr)
+// PrintAttr is an optional argument to Print.
+type PrintAttr func(optionalAttr)
-// Conv2DUseCudnnOnGpu sets the optional use_cudnn_on_gpu attribute to value.
-// If not specified, defaults to true
-func Conv2DUseCudnnOnGpu(value bool) Conv2DAttr {
+// PrintMessage sets the optional message attribute to value.
+//
+// value: A string, prefix of the error message.
+// If not specified, defaults to ""
+func PrintMessage(value string) PrintAttr {
return func(m optionalAttr) {
- m["use_cudnn_on_gpu"] = value
+ m["message"] = value
}
}
-// Conv2DDataFormat sets the optional data_format attribute to value.
+// PrintFirstN sets the optional first_n attribute to value.
//
-// value: Specify the data format of the input and output data. With the
-// default format "NHWC", the data is stored in the order of:
-// [batch, height, width, channels].
-// Alternatively, the format could be "NCHW", the data storage order of:
-// [batch, channels, height, width].
-// If not specified, defaults to "NHWC"
-func Conv2DDataFormat(value string) Conv2DAttr {
+// value: Only log `first_n` number of times. -1 disables logging.
+// If not specified, defaults to -1
+func PrintFirstN(value int64) PrintAttr {
return func(m optionalAttr) {
- m["data_format"] = value
+ m["first_n"] = value
}
}
-// Conv2DDilations sets the optional dilations attribute to value.
+// PrintSummarize sets the optional summarize attribute to value.
//
-// value: 1-D tensor of length 4. The dilation factor for each dimension of
-// `input`. If set to k > 1, there will be k-1 skipped cells between each
-// filter element on that dimension. The dimension order is determined by the
-// value of `data_format`, see above for details. Dilations in the batch and
-// depth dimensions must be 1.
-// If not specified, defaults to <i:1 i:1 i:1 i:1 >
-func Conv2DDilations(value []int64) Conv2DAttr {
+// value: Only print this many entries of each tensor.
+// If not specified, defaults to 3
+func PrintSummarize(value int64) PrintAttr {
return func(m optionalAttr) {
- m["dilations"] = value
+ m["summarize"] = value
}
}
-// Computes a 2-D convolution given 4-D `input` and `filter` tensors.
-//
-// Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
-// and a filter / kernel tensor of shape
-// `[filter_height, filter_width, in_channels, out_channels]`, this op
-// performs the following:
-//
-// 1. Flattens the filter to a 2-D matrix with shape
-// `[filter_height * filter_width * in_channels, output_channels]`.
-// 2. Extracts image patches from the input tensor to form a *virtual*
-// tensor of shape `[batch, out_height, out_width,
-// filter_height * filter_width * in_channels]`.
-// 3. For each patch, right-multiplies the filter matrix and the image patch
-// vector.
-//
-// In detail, with the default NHWC format,
-//
-// output[b, i, j, k] =
-// sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
-// filter[di, dj, q, k]
+// Prints a list of tensors.
//
-// Must have `strides[0] = strides[3] = 1`. For the most common case of the same
-// horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
+// Passes `input` through to `output` and prints `data` when evaluating.
//
// Arguments:
-// input: A 4-D tensor. The dimension order is interpreted according to the value
-// of `data_format`, see below for details.
-// filter: A 4-D tensor of shape
-// `[filter_height, filter_width, in_channels, out_channels]`
-// strides: 1-D tensor of length 4. The stride of the sliding window for each
-// dimension of `input`. The dimension order is determined by the value of
-// `data_format`, see below for details.
-// padding: The type of padding algorithm to use.
+// input: The tensor passed to `output`
+// data: A list of tensors to print out when op is evaluated.
//
-// Returns A 4-D tensor. The dimension order is determined by the value of
-// `data_format`, see below for details.
-func Conv2D(scope *Scope, input tf.Output, filter tf.Output, strides []int64, padding string, optional ...Conv2DAttr) (output tf.Output) {
+// Returns = The unmodified `input` tensor
+func Print(scope *Scope, input tf.Output, data []tf.Output, optional ...PrintAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"strides": strides, "padding": padding}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "Conv2D",
+ Type: "Print",
Input: []tf.Input{
- input, filter,
+ input, tf.OutputList(data),
},
Attrs: attrs,
}
@@ -16464,39 +16714,44 @@ func Conv2D(scope *Scope, input tf.Output, filter tf.Output, strides []int64, pa
return op.Output(0)
}
-// VariableShapeAttr is an optional argument to VariableShape.
-type VariableShapeAttr func(optionalAttr)
-
-// VariableShapeOutType sets the optional out_type attribute to value.
-// If not specified, defaults to DT_INT32
-func VariableShapeOutType(value tf.DataType) VariableShapeAttr {
- return func(m optionalAttr) {
- m["out_type"] = value
+// Outputs a `Summary` protocol buffer with a tensor and per-plugin data.
+//
+// Arguments:
+// tag: A string attached to this summary. Used for organization in TensorBoard.
+// tensor: A tensor to serialize.
+// serialized_summary_metadata: A serialized SummaryMetadata proto. Contains plugin
+// data.
+func TensorSummaryV2(scope *Scope, tag tf.Output, tensor tf.Output, serialized_summary_metadata tf.Output) (summary tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "TensorSummaryV2",
+ Input: []tf.Input{
+ tag, tensor, serialized_summary_metadata,
+ },
}
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Returns the shape of the variable pointed to by `resource`.
+// Creates a dataset that asynchronously prefetches elements from `input_dataset`.
//
-// This operation returns a 1-D integer tensor representing the shape of `input`.
+// Arguments:
//
-// For example:
+// buffer_size: The maximum number of elements to buffer in an iterator over
+// this dataset.
//
-// ```
-// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
-// shape(t) ==> [2, 2, 3]
-// ```
-func VariableShape(scope *Scope, input tf.Output, optional ...VariableShapeAttr) (output tf.Output) {
+//
+func PrefetchDataset(scope *Scope, input_dataset tf.Output, buffer_size tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
+ attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
opspec := tf.OpSpec{
- Type: "VariableShape",
+ Type: "PrefetchDataset",
Input: []tf.Input{
- input,
+ input_dataset, buffer_size,
},
Attrs: attrs,
}
@@ -16504,28 +16759,48 @@ func VariableShape(scope *Scope, input tf.Output, optional ...VariableShapeAttr)
return op.Output(0)
}
-// StringJoinAttr is an optional argument to StringJoin.
-type StringJoinAttr func(optionalAttr)
+// TensorSummaryAttr is an optional argument to TensorSummary.
+type TensorSummaryAttr func(optionalAttr)
-// StringJoinSeparator sets the optional separator attribute to value.
+// TensorSummaryDescription sets the optional description attribute to value.
//
-// value: string, an optional join separator.
+// value: A json-encoded SummaryDescription proto.
// If not specified, defaults to ""
-func StringJoinSeparator(value string) StringJoinAttr {
+func TensorSummaryDescription(value string) TensorSummaryAttr {
return func(m optionalAttr) {
- m["separator"] = value
+ m["description"] = value
}
}
-// Joins the strings in the given list of string tensors into one tensor;
+// TensorSummaryLabels sets the optional labels attribute to value.
//
-// with the given separator (default is an empty separator).
+// value: An unused list of strings.
+// If not specified, defaults to <>
+func TensorSummaryLabels(value []string) TensorSummaryAttr {
+ return func(m optionalAttr) {
+ m["labels"] = value
+ }
+}
+
+// TensorSummaryDisplayName sets the optional display_name attribute to value.
+//
+// value: An unused string.
+// If not specified, defaults to ""
+func TensorSummaryDisplayName(value string) TensorSummaryAttr {
+ return func(m optionalAttr) {
+ m["display_name"] = value
+ }
+}
+
+// Outputs a `Summary` protocol buffer with a tensor.
+//
+// This op is being phased out in favor of TensorSummaryV2, which lets callers pass
+// a tag as well as a serialized SummaryMetadata proto string that contains
+// plugin-specific data. We will keep this op to maintain backwards compatibility.
//
// Arguments:
-// inputs: A list of string tensors. The tensors must all have the same shape,
-// or be scalars. Scalars may be mixed in; these will be broadcast to the shape
-// of non-scalar inputs.
-func StringJoin(scope *Scope, inputs []tf.Output, optional ...StringJoinAttr) (output tf.Output) {
+// tensor: A tensor to serialize.
+func TensorSummary(scope *Scope, tensor tf.Output, optional ...TensorSummaryAttr) (summary tf.Output) {
if scope.Err() != nil {
return
}
@@ -16534,9 +16809,9 @@ func StringJoin(scope *Scope, inputs []tf.Output, optional ...StringJoinAttr) (o
a(attrs)
}
opspec := tf.OpSpec{
- Type: "StringJoin",
+ Type: "TensorSummary",
Input: []tf.Input{
- tf.OutputList(inputs),
+ tensor,
},
Attrs: attrs,
}
@@ -16544,201 +16819,163 @@ func StringJoin(scope *Scope, inputs []tf.Output, optional ...StringJoinAttr) (o
return op.Output(0)
}
-// Transforms a vector of brain.Example protos (as strings) into typed tensors.
+// Computes the gradient for the tanh of `x` wrt its input.
//
-// Arguments:
-// serialized: A vector containing a batch of binary serialized Example protos.
-// names: A vector containing the names of the serialized protos.
-// May contain, for example, table key (descriptive) names for the
-// corresponding serialized protos. These are purely useful for debugging
-// purposes, and the presence of values here has no effect on the output.
-// May also be an empty vector if no names are available.
-// If non-empty, this vector must be the same length as "serialized".
-// sparse_keys: A list of Nsparse string Tensors (scalars).
-// The keys expected in the Examples' features associated with sparse values.
-// dense_keys: A list of Ndense string Tensors (scalars).
-// The keys expected in the Examples' features associated with dense values.
-// dense_defaults: A list of Ndense Tensors (some may be empty).
-// dense_defaults[j] provides default values
-// when the example's feature_map lacks dense_key[j]. If an empty Tensor is
-// provided for dense_defaults[j], then the Feature dense_keys[j] is required.
-// The input type is inferred from dense_defaults[j], even when it's empty.
-// If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined,
-// then the shape of dense_defaults[j] must match that of dense_shapes[j].
-// If dense_shapes[j] has an undefined major dimension (variable strides dense
-// feature), dense_defaults[j] must contain a single element:
-// the padding element.
-// sparse_types: A list of Nsparse types; the data types of data in each Feature
-// given in sparse_keys.
-// Currently the ParseExample supports DT_FLOAT (FloatList),
-// DT_INT64 (Int64List), and DT_STRING (BytesList).
-// dense_shapes: A list of Ndense shapes; the shapes of data in each Feature
-// given in dense_keys.
-// The number of elements in the Feature corresponding to dense_key[j]
-// must always equal dense_shapes[j].NumEntries().
-// If dense_shapes[j] == (D0, D1, ..., DN) then the shape of output
-// Tensor dense_values[j] will be (|serialized|, D0, D1, ..., DN):
-// The dense outputs are just the inputs row-stacked by batch.
-// This works for dense_shapes[j] = (-1, D1, ..., DN). In this case
-// the shape of the output Tensor dense_values[j] will be
-// (|serialized|, M, D1, .., DN), where M is the maximum number of blocks
-// of elements of length D1 * .... * DN, across all minibatch entries
-// in the input. Any minibatch entry with less than M blocks of elements of
-// length D1 * ... * DN will be padded with the corresponding default_value
-// scalar element along the second dimension.
-func ParseExample(scope *Scope, serialized tf.Output, names tf.Output, sparse_keys []tf.Output, dense_keys []tf.Output, dense_defaults []tf.Output, sparse_types []tf.DataType, dense_shapes []tf.Shape) (sparse_indices []tf.Output, sparse_values []tf.Output, sparse_shapes []tf.Output, dense_values []tf.Output) {
+// Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy`
+// is the corresponding input gradient.
+func TanhGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"sparse_types": sparse_types, "dense_shapes": dense_shapes}
opspec := tf.OpSpec{
- Type: "ParseExample",
+ Type: "TanhGrad",
Input: []tf.Input{
- serialized, names, tf.OutputList(sparse_keys), tf.OutputList(dense_keys), tf.OutputList(dense_defaults),
+ y, dy,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
- if scope.Err() != nil {
- return
- }
- var idx int
- var err error
- if sparse_indices, idx, err = makeOutputList(op, idx, "sparse_indices"); err != nil {
- scope.UpdateErr("ParseExample", err)
- return
- }
- if sparse_values, idx, err = makeOutputList(op, idx, "sparse_values"); err != nil {
- scope.UpdateErr("ParseExample", err)
- return
- }
- if sparse_shapes, idx, err = makeOutputList(op, idx, "sparse_shapes"); err != nil {
- scope.UpdateErr("ParseExample", err)
- return
- }
- if dense_values, idx, err = makeOutputList(op, idx, "dense_values"); err != nil {
- scope.UpdateErr("ParseExample", err)
- return
- }
- return sparse_indices, sparse_values, sparse_shapes, dense_values
+ return op.Output(0)
}
-// Compute the pairwise cross product.
+// Outputs a `Summary` protocol buffer with scalar values.
//
-// `a` and `b` must be the same shape; they can either be simple 3-element vectors,
-// or any shape where the innermost dimension is 3. In the latter case, each pair
-// of corresponding 3-element vectors is cross-multiplied independently.
+// The input `tags` and `values` must have the same shape. The generated summary
+// has a summary value for each tag-value pair in `tags` and `values`.
//
// Arguments:
-// a: A tensor containing 3-element vectors.
-// b: Another tensor, of same type and shape as `a`.
+// tags: Tags for the summary.
+// values: Same shape as `tags. Values for the summary.
//
-// Returns Pairwise cross product of the vectors in `a` and `b`.
-func Cross(scope *Scope, a tf.Output, b tf.Output) (product tf.Output) {
+// Returns Scalar. Serialized `Summary` protocol buffer.
+func ScalarSummary(scope *Scope, tags tf.Output, values tf.Output) (summary tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Cross",
+ Type: "ScalarSummary",
Input: []tf.Input{
- a, b,
+ tags, values,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Inverse 2D real-valued fast Fourier transform.
-//
-// Computes the inverse 2-dimensional discrete Fourier transform of a real-valued
-// signal over the inner-most 2 dimensions of `input`.
+// Outputs a `Summary` protocol buffer with a histogram.
//
-// The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`:
-// The inner-most dimension contains the `fft_length / 2 + 1` unique components of
-// the DFT of a real-valued signal. If `fft_length` is not provided, it is computed
-// from the size of the inner-most 2 dimensions of `input`. If the FFT length used
-// to compute `input` is odd, it should be provided since it cannot be inferred
-// properly.
+// The generated
+// [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
+// has one summary value containing a histogram for `values`.
//
-// Along each axis `IRFFT2D` is computed on, if `fft_length` (or
-// `fft_length / 2 + 1` for the inner-most dimension) is smaller than the
-// corresponding dimension of `input`, the dimension is cropped. If it is larger,
-// the dimension is padded with zeros.
+// This op reports an `InvalidArgument` error if any value is not finite.
//
// Arguments:
-// input: A complex64 tensor.
-// fft_length: An int32 tensor of shape [2]. The FFT length for each dimension.
-//
-// Returns A float32 tensor of the same rank as `input`. The inner-most 2
-// dimensions of `input` are replaced with the `fft_length` samples of their
-// inverse 2D Fourier transform.
+// tag: Scalar. Tag to use for the `Summary.Value`.
+// values: Any shape. Values to use to build the histogram.
//
-// @compatibility(numpy)
-// Equivalent to np.fft.irfft2
-// @end_compatibility
-func IRFFT2D(scope *Scope, input tf.Output, fft_length tf.Output) (output tf.Output) {
+// Returns Scalar. Serialized `Summary` protocol buffer.
+func HistogramSummary(scope *Scope, tag tf.Output, values tf.Output) (summary tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "IRFFT2D",
+ Type: "HistogramSummary",
Input: []tf.Input{
- input, fft_length,
+ tag, values,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Returns element-wise remainder of division. This emulates C semantics in that
+// Computes the number of elements in the given queue.
//
-// the result here is consistent with a truncating divide. E.g. `truncate(x / y) *
-// y + truncate_mod(x, y) = x`.
+// Arguments:
+// handle: The handle to a queue.
//
-// *NOTE*: `TruncateMod` supports broadcasting. More about broadcasting
-// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-func TruncateMod(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
+// Returns The number of elements in the given queue.
+func QueueSizeV2(scope *Scope, handle tf.Output) (size tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "TruncateMod",
+ Type: "QueueSizeV2",
Input: []tf.Input{
- x, y,
+ handle,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// ResourceApplyAdagradAttr is an optional argument to ResourceApplyAdagrad.
-type ResourceApplyAdagradAttr func(optionalAttr)
+// ImageSummaryAttr is an optional argument to ImageSummary.
+type ImageSummaryAttr func(optionalAttr)
-// ResourceApplyAdagradUseLocking sets the optional use_locking attribute to value.
+// ImageSummaryMaxImages sets the optional max_images attribute to value.
//
-// value: If `True`, updating of the var and accum tensors will be protected
-// by a lock; otherwise the behavior is undefined, but may exhibit less
-// contention.
-// If not specified, defaults to false
-func ResourceApplyAdagradUseLocking(value bool) ResourceApplyAdagradAttr {
+// value: Max number of batch elements to generate images for.
+// If not specified, defaults to 3
+//
+// REQUIRES: value >= 1
+func ImageSummaryMaxImages(value int64) ImageSummaryAttr {
return func(m optionalAttr) {
- m["use_locking"] = value
+ m["max_images"] = value
}
}
-// Update '*var' according to the adagrad scheme.
+// ImageSummaryBadColor sets the optional bad_color attribute to value.
//
-// accum += grad * grad
-// var -= lr * grad * (1 / sqrt(accum))
+// value: Color to use for pixels with non-finite values.
+// If not specified, defaults to <dtype:DT_UINT8 tensor_shape:<dim:<size:4 > > int_val:255 int_val:0 int_val:0 int_val:255 >
+func ImageSummaryBadColor(value tf.Tensor) ImageSummaryAttr {
+ return func(m optionalAttr) {
+ m["bad_color"] = value
+ }
+}
+
+// Outputs a `Summary` protocol buffer with images.
+//
+// The summary has up to `max_images` summary values containing images. The
+// images are built from `tensor` which must be 4-D with shape `[batch_size,
+// height, width, channels]` and where `channels` can be:
+//
+// * 1: `tensor` is interpreted as Grayscale.
+// * 3: `tensor` is interpreted as RGB.
+// * 4: `tensor` is interpreted as RGBA.
+//
+// The images have the same number of channels as the input tensor. For float
+// input, the values are normalized one image at a time to fit in the range
+// `[0, 255]`. `uint8` values are unchanged. The op uses two different
+// normalization algorithms:
+//
+// * If the input values are all positive, they are rescaled so the largest one
+// is 255.
+//
+// * If any input value is negative, the values are shifted so input value 0.0
+// is at 127. They are then rescaled so that either the smallest value is 0,
+// or the largest one is 255.
+//
+// The `tag` argument is a scalar `Tensor` of type `string`. It is used to
+// build the `tag` of the summary values:
+//
+// * If `max_images` is 1, the summary value tag is '*tag*/image'.
+// * If `max_images` is greater than 1, the summary value tags are
+// generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
+//
+// The `bad_color` argument is the color to use in the generated images for
+// non-finite input values. It is a `unit8` 1-D tensor of length `channels`.
+// Each element must be in the range `[0, 255]` (It represents the value of a
+// pixel in the output image). Non-finite values in the input tensor are
+// replaced by this tensor in the output image. The default value is the color
+// red.
//
// Arguments:
-// var_: Should be from a Variable().
-// accum: Should be from a Variable().
-// lr: Scaling factor. Must be a scalar.
-// grad: The gradient.
+// tag: Scalar. Used to build the `tag` attribute of the summary values.
+// tensor: 4-D of shape `[batch_size, height, width, channels]` where
+// `channels` is 1, 3, or 4.
//
-// Returns the created operation.
-func ResourceApplyAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, optional ...ResourceApplyAdagradAttr) (o *tf.Operation) {
+// Returns Scalar. Serialized `Summary` protocol buffer.
+func ImageSummary(scope *Scope, tag tf.Output, tensor tf.Output, optional ...ImageSummaryAttr) (summary tf.Output) {
if scope.Err() != nil {
return
}
@@ -16747,52 +16984,52 @@ func ResourceApplyAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.O
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ResourceApplyAdagrad",
+ Type: "ImageSummary",
Input: []tf.Input{
- var_, accum, lr, grad,
+ tag, tensor,
},
Attrs: attrs,
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// SparseReduceSumAttr is an optional argument to SparseReduceSum.
-type SparseReduceSumAttr func(optionalAttr)
+// AudioSummaryV2Attr is an optional argument to AudioSummaryV2.
+type AudioSummaryV2Attr func(optionalAttr)
-// SparseReduceSumKeepDims sets the optional keep_dims attribute to value.
+// AudioSummaryV2MaxOutputs sets the optional max_outputs attribute to value.
//
-// value: If true, retain reduced dimensions with length 1.
-// If not specified, defaults to false
-func SparseReduceSumKeepDims(value bool) SparseReduceSumAttr {
+// value: Max number of batch elements to generate audio for.
+// If not specified, defaults to 3
+//
+// REQUIRES: value >= 1
+func AudioSummaryV2MaxOutputs(value int64) AudioSummaryV2Attr {
return func(m optionalAttr) {
- m["keep_dims"] = value
+ m["max_outputs"] = value
}
}
-// Computes the sum of elements across dimensions of a SparseTensor.
+// Outputs a `Summary` protocol buffer with audio.
//
-// This Op takes a SparseTensor and is the sparse counterpart to
-// `tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor`
-// instead of a sparse one.
+// The summary has up to `max_outputs` summary values containing audio. The
+// audio is built from `tensor` which must be 3-D with shape `[batch_size,
+// frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
+// assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
//
-// Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
-// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
-// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
-// with length 1.
+// The `tag` argument is a scalar `Tensor` of type `string`. It is used to
+// build the `tag` of the summary values:
//
-// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
-// with a single element is returned. Additionally, the axes can be negative,
-// which are interpreted according to the indexing rules in Python.
+// * If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
+// * If `max_outputs` is greater than 1, the summary value tags are
+// generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
//
// Arguments:
-// input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
-// SparseTensor, possibly not in canonical ordering.
-// input_values: 1-D. `N` non-empty values corresponding to `input_indices`.
-// input_shape: 1-D. Shape of the input SparseTensor.
-// reduction_axes: 1-D. Length-`K` vector containing the reduction axes.
+// tag: Scalar. Used to build the `tag` attribute of the summary values.
+// tensor: 2-D of shape `[batch_size, frames]`.
+// sample_rate: The sample rate of the signal in hertz.
//
-// Returns `R-K`-D. The reduced Tensor.
-func SparseReduceSum(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceSumAttr) (output tf.Output) {
+// Returns Scalar. Serialized `Summary` protocol buffer.
+func AudioSummaryV2(scope *Scope, tag tf.Output, tensor tf.Output, sample_rate tf.Output, optional ...AudioSummaryV2Attr) (summary tf.Output) {
if scope.Err() != nil {
return
}
@@ -16801,9 +17038,9 @@ func SparseReduceSum(scope *Scope, input_indices tf.Output, input_values tf.Outp
a(attrs)
}
opspec := tf.OpSpec{
- Type: "SparseReduceSum",
+ Type: "AudioSummaryV2",
Input: []tf.Input{
- input_indices, input_values, input_shape, reduction_axes,
+ tag, tensor, sample_rate,
},
Attrs: attrs,
}
@@ -16811,35 +17048,36 @@ func SparseReduceSum(scope *Scope, input_indices tf.Output, input_values tf.Outp
return op.Output(0)
}
-// MaxPool3DGradAttr is an optional argument to MaxPool3DGrad.
-type MaxPool3DGradAttr func(optionalAttr)
+// AvgPoolAttr is an optional argument to AvgPool.
+type AvgPoolAttr func(optionalAttr)
-// MaxPool3DGradDataFormat sets the optional data_format attribute to value.
+// AvgPoolDataFormat sets the optional data_format attribute to value.
//
-// value: The data format of the input and output data. With the
-// default format "NDHWC", the data is stored in the order of:
-// [batch, in_depth, in_height, in_width, in_channels].
-// Alternatively, the format could be "NCDHW", the data storage order is:
-// [batch, in_channels, in_depth, in_height, in_width].
-// If not specified, defaults to "NDHWC"
-func MaxPool3DGradDataFormat(value string) MaxPool3DGradAttr {
+// value: Specify the data format of the input and output data. With the
+// default format "NHWC", the data is stored in the order of:
+// [batch, in_height, in_width, in_channels].
+// Alternatively, the format could be "NCHW", the data storage order of:
+// [batch, in_channels, in_height, in_width].
+// If not specified, defaults to "NHWC"
+func AvgPoolDataFormat(value string) AvgPoolAttr {
return func(m optionalAttr) {
m["data_format"] = value
}
}
-// Computes gradients of max pooling function.
+// Performs average pooling on the input.
+//
+// Each entry in `output` is the mean of the corresponding size `ksize`
+// window in `value`.
//
// Arguments:
-// orig_input: The original input tensor.
-// orig_output: The original output tensor.
-// grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.
-// ksize: 1-D tensor of length 5. The size of the window for each dimension of
-// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
-// strides: 1-D tensor of length 5. The stride of the sliding window for each
-// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
+// value: 4-D with shape `[batch, height, width, channels]`.
+// ksize: The size of the sliding window for each dimension of `value`.
+// strides: The stride of the sliding window for each dimension of `value`.
// padding: The type of padding algorithm to use.
-func MaxPool3DGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPool3DGradAttr) (output tf.Output) {
+//
+// Returns The average pooled output tensor.
+func AvgPool(scope *Scope, value tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPoolAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -16848,9 +17086,9 @@ func MaxPool3DGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, gr
a(attrs)
}
opspec := tf.OpSpec{
- Type: "MaxPool3DGrad",
+ Type: "AvgPool",
Input: []tf.Input{
- orig_input, orig_output, grad,
+ value,
},
Attrs: attrs,
}
@@ -16858,195 +17096,216 @@ func MaxPool3DGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, gr
return op.Output(0)
}
-// Computes the Gauss error function of `x` element-wise.
-func Erf(scope *Scope, x tf.Output) (y tf.Output) {
+// Merges summaries.
+//
+// This op creates a
+// [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
+// protocol buffer that contains the union of all the values in the input
+// summaries.
+//
+// When the Op is run, it reports an `InvalidArgument` error if multiple values
+// in the summaries to merge use the same tag.
+//
+// Arguments:
+// inputs: Can be of any shape. Each must contain serialized `Summary` protocol
+// buffers.
+//
+// Returns Scalar. Serialized `Summary` protocol buffer.
+func MergeSummary(scope *Scope, inputs []tf.Output) (summary tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Erf",
+ Type: "MergeSummary",
Input: []tf.Input{
- x,
+ tf.OutputList(inputs),
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Returns element-wise largest integer not greater than x.
-func Floor(scope *Scope, x tf.Output) (y tf.Output) {
+// Computes the gradient of morphological 2-D dilation with respect to the filter.
+//
+// Arguments:
+// input: 4-D with shape `[batch, in_height, in_width, depth]`.
+// filter: 3-D with shape `[filter_height, filter_width, depth]`.
+// out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.
+// strides: 1-D of length 4. The stride of the sliding window for each dimension of
+// the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
+// rates: 1-D of length 4. The input stride for atrous morphological dilation.
+// Must be: `[1, rate_height, rate_width, 1]`.
+// padding: The type of padding algorithm to use.
+//
+// Returns 3-D with shape `[filter_height, filter_width, depth]`.
+func Dilation2DBackpropFilter(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, rates []int64, padding string) (filter_backprop tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"strides": strides, "rates": rates, "padding": padding}
opspec := tf.OpSpec{
- Type: "Floor",
+ Type: "Dilation2DBackpropFilter",
Input: []tf.Input{
- x,
+ input, filter, out_backprop,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// ThreadUnsafeUnigramCandidateSamplerAttr is an optional argument to ThreadUnsafeUnigramCandidateSampler.
-type ThreadUnsafeUnigramCandidateSamplerAttr func(optionalAttr)
+// AddSparseToTensorsMapAttr is an optional argument to AddSparseToTensorsMap.
+type AddSparseToTensorsMapAttr func(optionalAttr)
-// ThreadUnsafeUnigramCandidateSamplerSeed sets the optional seed attribute to value.
+// AddSparseToTensorsMapContainer sets the optional container attribute to value.
//
-// value: If either seed or seed2 are set to be non-zero, the random number
-// generator is seeded by the given seed. Otherwise, it is seeded by a
-// random seed.
-// If not specified, defaults to 0
-func ThreadUnsafeUnigramCandidateSamplerSeed(value int64) ThreadUnsafeUnigramCandidateSamplerAttr {
+// value: The container name for the `SparseTensorsMap` created by this op.
+// If not specified, defaults to ""
+func AddSparseToTensorsMapContainer(value string) AddSparseToTensorsMapAttr {
return func(m optionalAttr) {
- m["seed"] = value
+ m["container"] = value
}
}
-// ThreadUnsafeUnigramCandidateSamplerSeed2 sets the optional seed2 attribute to value.
+// AddSparseToTensorsMapSharedName sets the optional shared_name attribute to value.
//
-// value: An second seed to avoid seed collision.
-// If not specified, defaults to 0
-func ThreadUnsafeUnigramCandidateSamplerSeed2(value int64) ThreadUnsafeUnigramCandidateSamplerAttr {
+// value: The shared name for the `SparseTensorsMap` created by this op.
+// If blank, the new Operation's unique name is used.
+// If not specified, defaults to ""
+func AddSparseToTensorsMapSharedName(value string) AddSparseToTensorsMapAttr {
return func(m optionalAttr) {
- m["seed2"] = value
+ m["shared_name"] = value
}
}
-// Generates labels for candidate sampling with a learned unigram distribution.
+// Add a `SparseTensor` to a `SparseTensorsMap` return its handle.
//
-// See explanations of candidate sampling and the data formats at
-// go/candidate-sampling.
+// A `SparseTensor` is represented by three tensors: `sparse_indices`,
+// `sparse_values`, and `sparse_shape`.
//
-// For each batch, this op picks a single set of sampled candidate labels.
+// This operator takes the given `SparseTensor` and adds it to a container
+// object (a `SparseTensorsMap`). A unique key within this container is generated
+// in the form of an `int64`, and this is the value that is returned.
//
-// The advantages of sampling candidates per-batch are simplicity and the
-// possibility of efficient dense matrix multiplication. The disadvantage is that
-// the sampled candidates must be chosen independently of the context and of the
-// true labels.
+// The `SparseTensor` can then be read out as part of a minibatch by passing
+// the key as a vector element to `TakeManySparseFromTensorsMap`. To ensure
+// the correct `SparseTensorsMap` is accessed, ensure that the same
+// `container` and `shared_name` are passed to that Op. If no `shared_name`
+// is provided here, instead use the *name* of the Operation created by calling
+// `AddSparseToTensorsMap` as the `shared_name` passed to
+// `TakeManySparseFromTensorsMap`. Ensure the Operations are colocated.
//
// Arguments:
-// true_classes: A batch_size * num_true matrix, in which each row contains the
-// IDs of the num_true target_classes in the corresponding original label.
-// num_true: Number of true labels per context.
-// num_sampled: Number of candidates to randomly sample.
-// unique: If unique is true, we sample with rejection, so that all sampled
-// candidates in a batch are unique. This requires some approximation to
-// estimate the post-rejection sampling probabilities.
-// range_max: The sampler will sample integers from the interval [0, range_max).
+// sparse_indices: 2-D. The `indices` of the `SparseTensor`.
+// sparse_values: 1-D. The `values` of the `SparseTensor`.
+// sparse_shape: 1-D. The `shape` of the `SparseTensor`.
//
-// Returns A vector of length num_sampled, in which each element is
-// the ID of a sampled candidate.A batch_size * num_true matrix, representing
-// the number of times each candidate is expected to occur in a batch
-// of sampled candidates. If unique=true, then this is a probability.A vector of length num_sampled, for each sampled
-// candidate representing the number of times the candidate is expected
-// to occur in a batch of sampled candidates. If unique=true, then this is a
-// probability.
-func ThreadUnsafeUnigramCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...ThreadUnsafeUnigramCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
+// Returns 0-D. The handle of the `SparseTensor` now stored in the
+// `SparseTensorsMap`.
+func AddSparseToTensorsMap(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...AddSparseToTensorsMapAttr) (sparse_handle tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ThreadUnsafeUnigramCandidateSampler",
+ Type: "AddSparseToTensorsMap",
Input: []tf.Input{
- true_classes,
+ sparse_indices, sparse_values, sparse_shape,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
-}
-
-// ResourceSparseApplyProximalAdagradAttr is an optional argument to ResourceSparseApplyProximalAdagrad.
-type ResourceSparseApplyProximalAdagradAttr func(optionalAttr)
-
-// ResourceSparseApplyProximalAdagradUseLocking sets the optional use_locking attribute to value.
-//
-// value: If True, updating of the var and accum tensors will be protected by
-// a lock; otherwise the behavior is undefined, but may exhibit less contention.
-// If not specified, defaults to false
-func ResourceSparseApplyProximalAdagradUseLocking(value bool) ResourceSparseApplyProximalAdagradAttr {
- return func(m optionalAttr) {
- m["use_locking"] = value
- }
+ return op.Output(0)
}
-// Sparse update entries in '*var' and '*accum' according to FOBOS algorithm.
+// Writes a `Summary` protocol buffer with scalar values.
//
-// That is for rows we have grad for, we update var and accum as follows:
-// accum += grad * grad
-// prox_v = var
-// prox_v -= lr * grad * (1 / sqrt(accum))
-// var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
+// The input `tag` and `value` must have the scalars.
//
// Arguments:
-// var_: Should be from a Variable().
-// accum: Should be from a Variable().
-// lr: Learning rate. Must be a scalar.
-// l1: L1 regularization. Must be a scalar.
-// l2: L2 regularization. Must be a scalar.
-// grad: The gradient.
-// indices: A vector of indices into the first dimension of var and accum.
+// writer: A handle to a summary writer.
+// step: The step to write the summary for.
+// tag: Tag for the summary.
+// value: Value for the summary.
//
// Returns the created operation.
-func ResourceSparseApplyProximalAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyProximalAdagradAttr) (o *tf.Operation) {
+func WriteScalarSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, value tf.Output) (o *tf.Operation) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "ResourceSparseApplyProximalAdagrad",
+ Type: "WriteScalarSummary",
Input: []tf.Input{
- var_, accum, lr, l1, l2, grad, indices,
+ writer, step, tag, value,
},
- Attrs: attrs,
}
return scope.AddOperation(opspec)
}
-// Store the input tensor in the state of the current session.
+// Computes the matrix exponential of one or more square matrices:
+//
+// exp(A) = \sum_{n=0}^\infty A^n/n!
+//
+// The exponential is computed using a combination of the scaling and squaring
+// method and the Pade approximation. Details can be founds in:
+// Nicholas J. Higham, "The scaling and squaring method for the matrix exponential
+// revisited," SIAM J. Matrix Anal. Applic., 26:1179-1193, 2005.
+//
+// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
+// form square matrices. The output is a tensor of the same shape as the input
+// containing the exponential for all input submatrices `[..., :, :]`.
//
// Arguments:
-// value: The tensor to be stored.
+// input: Shape is `[..., M, M]`.
//
-// Returns The handle for the tensor stored in the session state, represented
-// as a string.
-func GetSessionHandle(scope *Scope, value tf.Output) (handle tf.Output) {
+// Returns Shape is `[..., M, M]`.
+//
+// @compatibility(scipy)
+// Equivalent to scipy.linalg.expm
+// @end_compatibility
+func MatrixExponential(scope *Scope, input tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "GetSessionHandle",
+ Type: "MatrixExponential",
Input: []tf.Input{
- value,
+ input,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Decode web-safe base64-encoded strings.
+// Computes the Cholesky decomposition of one or more square matrices.
//
-// Input may or may not have padding at the end. See EncodeBase64 for padding.
-// Web-safe means that input must use - and _ instead of + and /.
+// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
+// form square matrices.
+//
+// The input has to be symmetric and positive definite. Only the lower-triangular
+// part of the input will be used for this operation. The upper-triangular part
+// will not be read.
+//
+// The output is a tensor of the same shape as the input
+// containing the Cholesky decompositions for all input submatrices `[..., :, :]`.
+//
+// **Note**: The gradient computation on GPU is faster for large matrices but
+// not for large batch dimensions when the submatrices are small. In this
+// case it might be faster to use the CPU.
//
// Arguments:
-// input: Base64 strings to decode.
+// input: Shape is `[..., M, M]`.
//
-// Returns Decoded strings.
-func DecodeBase64(scope *Scope, input tf.Output) (output tf.Output) {
+// Returns Shape is `[..., M, M]`.
+func Cholesky(scope *Scope, input tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "DecodeBase64",
+ Type: "Cholesky",
Input: []tf.Input{
input,
},
@@ -17055,230 +17314,172 @@ func DecodeBase64(scope *Scope, input tf.Output) (output tf.Output) {
return op.Output(0)
}
-// Computes hyperbolic tangent of `x` element-wise.
-func Tanh(scope *Scope, x tf.Output) (y tf.Output) {
+// Writes contents to the file at input filename. Creates file and recursively
+//
+// creates directory if not existing.
+//
+// Arguments:
+// filename: scalar. The name of the file to which we write the contents.
+// contents: scalar. The content to be written to the output file.
+//
+// Returns the created operation.
+func WriteFile(scope *Scope, filename tf.Output, contents tf.Output) (o *tf.Operation) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Tanh",
+ Type: "WriteFile",
Input: []tf.Input{
- x,
+ filename, contents,
},
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// Restores tensors from a V2 checkpoint.
-//
-// For backward compatibility with the V1 format, this Op currently allows
-// restoring from a V1 checkpoint as well:
-// - This Op first attempts to find the V2 index file pointed to by "prefix", and
-// if found proceed to read it as a V2 checkpoint;
-// - Otherwise the V1 read path is invoked.
-// Relying on this behavior is not recommended, as the ability to fall back to read
-// V1 might be deprecated and eventually removed.
+// AllAttr is an optional argument to All.
+type AllAttr func(optionalAttr)
+
+// AllKeepDims sets the optional keep_dims attribute to value.
//
-// By default, restores the named tensors in full. If the caller wishes to restore
-// specific slices of stored tensors, "shape_and_slices" should be non-empty
-// strings and correspondingly well-formed.
+// value: If true, retain reduced dimensions with length 1.
+// If not specified, defaults to false
+func AllKeepDims(value bool) AllAttr {
+ return func(m optionalAttr) {
+ m["keep_dims"] = value
+ }
+}
+
+// Computes the "logical and" of elements across dimensions of a tensor.
//
-// Callers must ensure all the named tensors are indeed stored in the checkpoint.
+// Reduces `input` along the dimensions given in `axis`. Unless
+// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+// `axis`. If `keep_dims` is true, the reduced dimensions are
+// retained with length 1.
//
// Arguments:
-// prefix: Must have a single element. The prefix of a V2 checkpoint.
-// tensor_names: shape {N}. The names of the tensors to be restored.
-// shape_and_slices: shape {N}. The slice specs of the tensors to be restored.
-// Empty strings indicate that they are non-partitioned tensors.
-// dtypes: shape {N}. The list of expected dtype for the tensors. Must match
-// those stored in the checkpoint.
+// input: The tensor to reduce.
+// axis: The dimensions to reduce. Must be in the range
+// `[-rank(input), rank(input))`.
//
-// Returns shape {N}. The restored tensors, whose shapes are read from the
-// checkpoint directly.
-func RestoreV2(scope *Scope, prefix tf.Output, tensor_names tf.Output, shape_and_slices tf.Output, dtypes []tf.DataType) (tensors []tf.Output) {
+// Returns The reduced tensor.
+func All(scope *Scope, input tf.Output, axis tf.Output, optional ...AllAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtypes": dtypes}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "RestoreV2",
+ Type: "All",
Input: []tf.Input{
- prefix, tensor_names, shape_and_slices,
+ input, axis,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- if scope.Err() != nil {
- return
- }
- var idx int
- var err error
- if tensors, idx, err = makeOutputList(op, idx, "tensors"); err != nil {
- scope.UpdateErr("RestoreV2", err)
- return
- }
- return tensors
+ return op.Output(0)
}
-// Returns x / y element-wise for integer types.
+// Computes the Eigen Decomposition of a batch of square self-adjoint matrices.
//
-// Truncation designates that negative numbers will round fractional quantities
-// toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different
-// than Python semantics. See `FloorDiv` for a division function that matches
-// Python Semantics.
+// DEPRECATED at GraphDef version 11: Use SelfAdjointEigV2 instead.
//
-// *NOTE*: `TruncateDiv` supports broadcasting. More about broadcasting
-// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-func TruncateDiv(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
+// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
+// form square matrices, with the same constraints as the single matrix
+// SelfAdjointEig.
+//
+// The result is a [..., M+1, M] matrix with [..., 0,:] containing the
+// eigenvalues, and subsequent [...,1:, :] containing the eigenvectors.
+//
+// Arguments:
+// input: Shape is `[..., M, M]`.
+//
+// Returns Shape is `[..., M+1, M]`.
+func SelfAdjointEig(scope *Scope, input tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "TruncateDiv",
+ Type: "SelfAdjointEig",
Input: []tf.Input{
- x, y,
+ input,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// SampleDistortedBoundingBoxAttr is an optional argument to SampleDistortedBoundingBox.
-type SampleDistortedBoundingBoxAttr func(optionalAttr)
-
-// SampleDistortedBoundingBoxSeed sets the optional seed attribute to value.
+// Computes softplus gradients for a softplus operation.
//
-// value: If either `seed` or `seed2` are set to non-zero, the random number
-// generator is seeded by the given `seed`. Otherwise, it is seeded by a random
-// seed.
-// If not specified, defaults to 0
-func SampleDistortedBoundingBoxSeed(value int64) SampleDistortedBoundingBoxAttr {
- return func(m optionalAttr) {
- m["seed"] = value
- }
-}
-
-// SampleDistortedBoundingBoxSeed2 sets the optional seed2 attribute to value.
+// Arguments:
+// gradients: The backpropagated gradients to the corresponding softplus operation.
+// features: The features passed as input to the corresponding softplus operation.
//
-// value: A second seed to avoid seed collision.
-// If not specified, defaults to 0
-func SampleDistortedBoundingBoxSeed2(value int64) SampleDistortedBoundingBoxAttr {
- return func(m optionalAttr) {
- m["seed2"] = value
+// Returns The gradients: `gradients / (1 + exp(-features))`.
+func SoftplusGrad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output) {
+ if scope.Err() != nil {
+ return
}
-}
-
-// SampleDistortedBoundingBoxMinObjectCovered sets the optional min_object_covered attribute to value.
-//
-// value: The cropped area of the image must contain at least this
-// fraction of any bounding box supplied. The value of this parameter should be
-// non-negative. In the case of 0, the cropped area does not need to overlap
-// any of the bounding boxes supplied.
-// If not specified, defaults to 0.1
-func SampleDistortedBoundingBoxMinObjectCovered(value float32) SampleDistortedBoundingBoxAttr {
- return func(m optionalAttr) {
- m["min_object_covered"] = value
+ opspec := tf.OpSpec{
+ Type: "SoftplusGrad",
+ Input: []tf.Input{
+ gradients, features,
+ },
}
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// SampleDistortedBoundingBoxAspectRatioRange sets the optional aspect_ratio_range attribute to value.
-//
-// value: The cropped area of the image must have an aspect ratio =
-// width / height within this range.
-// If not specified, defaults to <f:0.75 f:1.33 >
-func SampleDistortedBoundingBoxAspectRatioRange(value []float32) SampleDistortedBoundingBoxAttr {
- return func(m optionalAttr) {
- m["aspect_ratio_range"] = value
+// Creates a dataset that contains the unique elements of `input_dataset`.
+func UniqueDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
+ if scope.Err() != nil {
+ return
}
-}
-
-// SampleDistortedBoundingBoxAreaRange sets the optional area_range attribute to value.
-//
-// value: The cropped area of the image must contain a fraction of the
-// supplied image within in this range.
-// If not specified, defaults to <f:0.05 f:1 >
-func SampleDistortedBoundingBoxAreaRange(value []float32) SampleDistortedBoundingBoxAttr {
- return func(m optionalAttr) {
- m["area_range"] = value
+ attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
+ opspec := tf.OpSpec{
+ Type: "UniqueDataset",
+ Input: []tf.Input{
+ input_dataset,
+ },
+ Attrs: attrs,
}
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// SampleDistortedBoundingBoxMaxAttempts sets the optional max_attempts attribute to value.
-//
-// value: Number of attempts at generating a cropped region of the image
-// of the specified constraints. After `max_attempts` failures, return the entire
-// image.
-// If not specified, defaults to 100
-func SampleDistortedBoundingBoxMaxAttempts(value int64) SampleDistortedBoundingBoxAttr {
- return func(m optionalAttr) {
- m["max_attempts"] = value
- }
-}
+// SelfAdjointEigV2Attr is an optional argument to SelfAdjointEigV2.
+type SelfAdjointEigV2Attr func(optionalAttr)
-// SampleDistortedBoundingBoxUseImageIfNoBoundingBoxes sets the optional use_image_if_no_bounding_boxes attribute to value.
+// SelfAdjointEigV2ComputeV sets the optional compute_v attribute to value.
//
-// value: Controls behavior if no bounding boxes supplied.
-// If true, assume an implicit bounding box covering the whole input. If false,
-// raise an error.
-// If not specified, defaults to false
-func SampleDistortedBoundingBoxUseImageIfNoBoundingBoxes(value bool) SampleDistortedBoundingBoxAttr {
+// value: If `True` then eigenvectors will be computed and returned in `v`.
+// Otherwise, only the eigenvalues will be computed.
+// If not specified, defaults to true
+func SelfAdjointEigV2ComputeV(value bool) SelfAdjointEigV2Attr {
return func(m optionalAttr) {
- m["use_image_if_no_bounding_boxes"] = value
+ m["compute_v"] = value
}
}
-// Generate a single randomly distorted bounding box for an image.
-//
-// Bounding box annotations are often supplied in addition to ground-truth labels
-// in image recognition or object localization tasks. A common technique for
-// training such a system is to randomly distort an image while preserving
-// its content, i.e. *data augmentation*. This Op outputs a randomly distorted
-// localization of an object, i.e. bounding box, given an `image_size`,
-// `bounding_boxes` and a series of constraints.
-//
-// The output of this Op is a single bounding box that may be used to crop the
-// original image. The output is returned as 3 tensors: `begin`, `size` and
-// `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
-// image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize
-// what the bounding box looks like.
-//
-// Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The
-// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
-// height of the underlying image.
+// Computes the eigen decomposition of one or more square self-adjoint matrices.
//
-// For example,
+// Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in
+// `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`.
//
// ```python
-// # Generate a single distorted bounding box.
-// begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
-// tf.shape(image),
-// bounding_boxes=bounding_boxes)
-//
-// # Draw the bounding box in an image summary.
-// image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
-// bbox_for_draw)
-// tf.summary.image('images_with_box', image_with_box)
-//
-// # Employ the bounding box to distort the image.
-// distorted_image = tf.slice(image, begin, size)
+// # a is a tensor.
+// # e is a tensor of eigenvalues.
+// # v is a tensor of eigenvectors.
+// e, v = self_adjoint_eig(a)
+// e = self_adjoint_eig(a, compute_v=False)
// ```
//
-// Note that if no bounding box information is available, setting
-// `use_image_if_no_bounding_boxes = true` will assume there is a single implicit
-// bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
-// false and no bounding boxes are supplied, an error is raised.
-//
// Arguments:
-// image_size: 1-D, containing `[height, width, channels]`.
-// bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes
-// associated with the image.
+// input: `Tensor` input of shape `[N, N]`.
//
-// Returns 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to
-// `tf.slice`.1-D, containing `[target_height, target_width, -1]`. Provide as input to
-// `tf.slice`.3-D with shape `[1, 1, 4]` containing the distorted bounding box.
-// Provide as input to `tf.image.draw_bounding_boxes`.
-func SampleDistortedBoundingBox(scope *Scope, image_size tf.Output, bounding_boxes tf.Output, optional ...SampleDistortedBoundingBoxAttr) (begin tf.Output, size tf.Output, bboxes tf.Output) {
+// Returns Eigenvalues. Shape is `[N]`.Eigenvectors. Shape is `[N, N]`.
+func SelfAdjointEigV2(scope *Scope, input tf.Output, optional ...SelfAdjointEigV2Attr) (e tf.Output, v tf.Output) {
if scope.Err() != nil {
return
}
@@ -17287,26 +17488,54 @@ func SampleDistortedBoundingBox(scope *Scope, image_size tf.Output, bounding_box
a(attrs)
}
opspec := tf.OpSpec{
- Type: "SampleDistortedBoundingBox",
+ Type: "SelfAdjointEigV2",
Input: []tf.Input{
- image_size, bounding_boxes,
+ input,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
+ return op.Output(0), op.Output(1)
}
-// Returns the truth value of (x > y) element-wise.
+// Adjust the saturation of one or more images.
//
-// *NOTE*: `Greater` supports broadcasting. More about broadcasting
-// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-func Greater(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
+// `images` is a tensor of at least 3 dimensions. The last dimension is
+// interpretted as channels, and must be three.
+//
+// The input image is considered in the RGB colorspace. Conceptually, the RGB
+// colors are first mapped into HSV. A scale is then applied all the saturation
+// values, and then remapped back to RGB colorspace.
+//
+// Arguments:
+// images: Images to adjust. At least 3-D.
+// scale: A float scale to add to the saturation.
+//
+// Returns The hue-adjusted image or images.
+func AdjustSaturation(scope *Scope, images tf.Output, scale tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Greater",
+ Type: "AdjustSaturation",
+ Input: []tf.Input{
+ images, scale,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
+// Elementwise computes the bitwise OR of `x` and `y`.
+//
+// The result will have those bits set, that are set in `x`, `y` or both. The
+// computation is performed on the underlying representations of `x` and `y`.
+func BitwiseOr(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "BitwiseOr",
Input: []tf.Input{
x, y,
},
@@ -17315,47 +17544,65 @@ func Greater(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
return op.Output(0)
}
-// ResourceSparseApplyRMSPropAttr is an optional argument to ResourceSparseApplyRMSProp.
-type ResourceSparseApplyRMSPropAttr func(optionalAttr)
+// MatrixSolveLsAttr is an optional argument to MatrixSolveLs.
+type MatrixSolveLsAttr func(optionalAttr)
-// ResourceSparseApplyRMSPropUseLocking sets the optional use_locking attribute to value.
-//
-// value: If `True`, updating of the var, ms, and mom tensors is protected
-// by a lock; otherwise the behavior is undefined, but may exhibit less
-// contention.
-// If not specified, defaults to false
-func ResourceSparseApplyRMSPropUseLocking(value bool) ResourceSparseApplyRMSPropAttr {
+// MatrixSolveLsFast sets the optional fast attribute to value.
+// If not specified, defaults to true
+func MatrixSolveLsFast(value bool) MatrixSolveLsAttr {
return func(m optionalAttr) {
- m["use_locking"] = value
+ m["fast"] = value
}
}
-// Update '*var' according to the RMSProp algorithm.
+// Solves one or more linear least-squares problems.
//
-// Note that in dense implementation of this algorithm, ms and mom will
-// update even if the grad is zero, but in this sparse implementation, ms
-// and mom will not update in iterations during which the grad is zero.
+// `matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions
+// form real or complex matrices of size `[M, N]`. `Rhs` is a tensor of the same
+// type as `matrix` and shape `[..., M, K]`.
+// The output is a tensor shape `[..., N, K]` where each output matrix solves
+// each of the equations
+// `matrix[..., :, :]` * `output[..., :, :]` = `rhs[..., :, :]`
+// in the least squares sense.
//
-// mean_square = decay * mean_square + (1-decay) * gradient ** 2
-// Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
+// We use the following notation for (complex) matrix and right-hand sides
+// in the batch:
//
-// ms <- rho * ms_{t-1} + (1-rho) * grad * grad
-// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
-// var <- var - mom
+// `matrix`=\\(A \in \mathbb{C}^{m \times n}\\),
+// `rhs`=\\(B \in \mathbb{C}^{m \times k}\\),
+// `output`=\\(X \in \mathbb{C}^{n \times k}\\),
+// `l2_regularizer`=\\(\lambda \in \mathbb{R}\\).
+//
+// If `fast` is `True`, then the solution is computed by solving the normal
+// equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then
+// \\(X = (A^H A + \lambda I)^{-1} A^H B\\), which solves the least-squares
+// problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k} } ||A Z - B||_F^2 +
+// \lambda ||Z||_F^2\\). If \\(m \lt n\\) then `output` is computed as
+// \\(X = A^H (A A^H + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is the
+// minimum-norm solution to the under-determined linear system, i.e.
+// \\(X = \mathrm{argmin}_{Z \in \mathbb{C}^{n \times k} } ||Z||_F^2 \\),
+// subject to \\(A Z = B\\). Notice that the fast path is only numerically stable
+// when \\(A\\) is numerically full rank and has a condition number
+// \\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach} } }\\) or\\(\lambda\\) is
+// sufficiently large.
+//
+// If `fast` is `False` an algorithm based on the numerically robust complete
+// orthogonal decomposition is used. This computes the minimum-norm
+// least-squares solution, even when \\(A\\) is rank deficient. This path is
+// typically 6-7 times slower than the fast path. If `fast` is `False` then
+// `l2_regularizer` is ignored.
//
// Arguments:
-// var_: Should be from a Variable().
-// ms: Should be from a Variable().
-// mom: Should be from a Variable().
-// lr: Scaling factor. Must be a scalar.
-// rho: Decay rate. Must be a scalar.
+// matrix: Shape is `[..., M, N]`.
+// rhs: Shape is `[..., M, K]`.
+// l2_regularizer: Scalar tensor.
//
-// epsilon: Ridge term. Must be a scalar.
-// grad: The gradient.
-// indices: A vector of indices into the first dimension of var, ms and mom.
+// @compatibility(numpy)
+// Equivalent to np.linalg.lstsq
+// @end_compatibility
//
-// Returns the created operation.
-func ResourceSparseApplyRMSProp(scope *Scope, var_ tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyRMSPropAttr) (o *tf.Operation) {
+// Returns Shape is `[..., N, K]`.
+func MatrixSolveLs(scope *Scope, matrix tf.Output, rhs tf.Output, l2_regularizer tf.Output, optional ...MatrixSolveLsAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -17364,71 +17611,119 @@ func ResourceSparseApplyRMSProp(scope *Scope, var_ tf.Output, ms tf.Output, mom
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ResourceSparseApplyRMSProp",
+ Type: "MatrixSolveLs",
Input: []tf.Input{
- var_, ms, mom, lr, rho, momentum, epsilon, grad, indices,
+ matrix, rhs, l2_regularizer,
},
Attrs: attrs,
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Returns which elements of x are Inf.
+// SvdAttr is an optional argument to Svd.
+type SvdAttr func(optionalAttr)
+
+// SvdComputeUv sets the optional compute_uv attribute to value.
//
-// @compatibility(numpy)
-// Equivalent to np.isinf
-// @end_compatibility
-func IsInf(scope *Scope, x tf.Output) (y tf.Output) {
+// value: If true, left and right singular vectors will be
+// computed and returned in `u` and `v`, respectively.
+// If false, `u` and `v` are not set and should never referenced.
+// If not specified, defaults to true
+func SvdComputeUv(value bool) SvdAttr {
+ return func(m optionalAttr) {
+ m["compute_uv"] = value
+ }
+}
+
+// SvdFullMatrices sets the optional full_matrices attribute to value.
+//
+// value: If true, compute full-sized `u` and `v`. If false
+// (the default), compute only the leading `P` singular vectors.
+// Ignored if `compute_uv` is `False`.
+// If not specified, defaults to false
+func SvdFullMatrices(value bool) SvdAttr {
+ return func(m optionalAttr) {
+ m["full_matrices"] = value
+ }
+}
+
+// Computes the singular value decompositions of one or more matrices.
+//
+// Computes the SVD of each inner matrix in `input` such that
+// `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])`
+//
+// ```python
+// # a is a tensor containing a batch of matrices.
+// # s is a tensor of singular values for each matrix.
+// # u is the tensor containing of left singular vectors for each matrix.
+// # v is the tensor containing of right singular vectors for each matrix.
+// s, u, v = svd(a)
+// s, _, _ = svd(a, compute_uv=False)
+// ```
+//
+// Arguments:
+// input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
+// form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.
+//
+// Returns Singular values. Shape is `[..., P]`.Left singular vectors. If `full_matrices` is `False` then shape is
+// `[..., M, P]`; if `full_matrices` is `True` then shape is
+// `[..., M, M]`. Undefined if `compute_uv` is `False`.Left singular vectors. If `full_matrices` is `False` then shape is
+// `[..., N, P]`. If `full_matrices` is `True` then shape is `[..., N, N]`.
+// Undefined if `compute_uv` is false.
+func Svd(scope *Scope, input tf.Output, optional ...SvdAttr) (s tf.Output, u tf.Output, v tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "IsInf",
+ Type: "Svd",
Input: []tf.Input{
- x,
+ input,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// ResourceSparseApplyFtrlAttr is an optional argument to ResourceSparseApplyFtrl.
-type ResourceSparseApplyFtrlAttr func(optionalAttr)
+// QueueEnqueueManyV2Attr is an optional argument to QueueEnqueueManyV2.
+type QueueEnqueueManyV2Attr func(optionalAttr)
-// ResourceSparseApplyFtrlUseLocking sets the optional use_locking attribute to value.
+// QueueEnqueueManyV2TimeoutMs sets the optional timeout_ms attribute to value.
//
-// value: If `True`, updating of the var and accum tensors will be protected
-// by a lock; otherwise the behavior is undefined, but may exhibit less
-// contention.
-// If not specified, defaults to false
-func ResourceSparseApplyFtrlUseLocking(value bool) ResourceSparseApplyFtrlAttr {
+// value: If the queue is too full, this operation will block for up
+// to timeout_ms milliseconds.
+// Note: This option is not supported yet.
+// If not specified, defaults to -1
+func QueueEnqueueManyV2TimeoutMs(value int64) QueueEnqueueManyV2Attr {
return func(m optionalAttr) {
- m["use_locking"] = value
+ m["timeout_ms"] = value
}
}
-// Update relevant entries in '*var' according to the Ftrl-proximal scheme.
+// Enqueues zero or more tuples of one or more tensors in the given queue.
//
-// That is for rows we have grad for, we update var, accum and linear as follows:
-// accum_new = accum + grad * grad
-// linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
-// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
-// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
-// accum = accum_new
+// This operation slices each component tensor along the 0th dimension to
+// make multiple queue elements. All of the tuple components must have the
+// same size in the 0th dimension.
+//
+// The components input has k elements, which correspond to the components of
+// tuples stored in the given queue.
+//
+// N.B. If the queue is full, this operation will block until the given
+// elements have been enqueued (or 'timeout_ms' elapses, if specified).
//
// Arguments:
-// var_: Should be from a Variable().
-// accum: Should be from a Variable().
-// linear: Should be from a Variable().
-// grad: The gradient.
-// indices: A vector of indices into the first dimension of var and accum.
-// lr: Scaling factor. Must be a scalar.
-// l1: L1 regularization. Must be a scalar.
-// l2: L2 regularization. Must be a scalar.
-// lr_power: Scaling factor. Must be a scalar.
+// handle: The handle to a queue.
+// components: One or more tensors from which the enqueued tensors should
+// be taken.
//
// Returns the created operation.
-func ResourceSparseApplyFtrl(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, indices tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, lr_power tf.Output, optional ...ResourceSparseApplyFtrlAttr) (o *tf.Operation) {
+func QueueEnqueueManyV2(scope *Scope, handle tf.Output, components []tf.Output, optional ...QueueEnqueueManyV2Attr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
@@ -17437,106 +17732,123 @@ func ResourceSparseApplyFtrl(scope *Scope, var_ tf.Output, accum tf.Output, line
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ResourceSparseApplyFtrl",
+ Type: "QueueEnqueueManyV2",
Input: []tf.Input{
- var_, accum, linear, grad, indices, lr, l1, l2, lr_power,
+ handle, tf.OutputList(components),
},
Attrs: attrs,
}
return scope.AddOperation(opspec)
}
-// Component-wise multiplies a SparseTensor by a dense Tensor.
+// Computes the product along segments of a tensor.
//
-// The output locations corresponding to the implicitly zero elements in the sparse
-// tensor will be zero (i.e., will not take up storage space), regardless of the
-// contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN).
+// Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
+// segments.
//
-// *Limitation*: this Op only broadcasts the dense side to the sparse side, but not
-// the other direction.
+// Computes a tensor such that
+// \\(output_i = \prod_j data_j\\) where the product is over `j` such
+// that `segment_ids[j] == i`.
+//
+// If the product is empty for a given segment ID `i`, `output[i] = 1`.
+//
+// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentProd.png" alt>
+// </div>
//
// Arguments:
-// sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
-// SparseTensor, possibly not in canonical ordering.
-// sp_values: 1-D. `N` non-empty values corresponding to `sp_indices`.
-// sp_shape: 1-D. Shape of the input SparseTensor.
-// dense: `R`-D. The dense Tensor operand.
//
-// Returns 1-D. The `N` values that are operated on.
-func SparseDenseCwiseMul(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output, dense tf.Output) (output tf.Output) {
+// segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s
+// first dimension. Values should be sorted and can be repeated.
+//
+// Returns Has same shape as data, except for dimension 0 which
+// has size `k`, the number of segments.
+func SegmentProd(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "SparseDenseCwiseMul",
+ Type: "SegmentProd",
Input: []tf.Input{
- sp_indices, sp_values, sp_shape, dense,
+ data, segment_ids,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Creates a dataset that emits `components` as a tuple of tensors once.
-func TensorDataset(scope *Scope, components []tf.Output, output_shapes []tf.Shape) (handle tf.Output) {
+// Converts one or more images from RGB to HSV.
+//
+// Outputs a tensor of the same shape as the `images` tensor, containing the HSV
+// value of the pixels. The output is only well defined if the value in `images`
+// are in `[0,1]`.
+//
+// `output[..., 0]` contains hue, `output[..., 1]` contains saturation, and
+// `output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0
+// corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue.
+//
+// Arguments:
+// images: 1-D or higher rank. RGB data to convert. Last dimension must be size 3.
+//
+// Returns `images` converted to HSV.
+func RGBToHSV(scope *Scope, images tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"output_shapes": output_shapes}
opspec := tf.OpSpec{
- Type: "TensorDataset",
+ Type: "RGBToHSV",
Input: []tf.Input{
- tf.OutputList(components),
+ images,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// NonMaxSuppressionAttr is an optional argument to NonMaxSuppression.
-type NonMaxSuppressionAttr func(optionalAttr)
+// Does nothing. Only useful as a placeholder for control edges.
+//
+// Returns the created operation.
+func NoOp(scope *Scope) (o *tf.Operation) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "NoOp",
+ }
+ return scope.AddOperation(opspec)
+}
-// NonMaxSuppressionIouThreshold sets the optional iou_threshold attribute to value.
+// MergeV2CheckpointsAttr is an optional argument to MergeV2Checkpoints.
+type MergeV2CheckpointsAttr func(optionalAttr)
+
+// MergeV2CheckpointsDeleteOldDirs sets the optional delete_old_dirs attribute to value.
//
-// value: A float representing the threshold for deciding whether boxes
-// overlap too much with respect to IOU.
-// If not specified, defaults to 0.5
-func NonMaxSuppressionIouThreshold(value float32) NonMaxSuppressionAttr {
+// value: see above.
+// If not specified, defaults to true
+func MergeV2CheckpointsDeleteOldDirs(value bool) MergeV2CheckpointsAttr {
return func(m optionalAttr) {
- m["iou_threshold"] = value
+ m["delete_old_dirs"] = value
}
}
-// Greedily selects a subset of bounding boxes in descending order of score,
+// V2 format specific: merges the metadata files of sharded checkpoints. The
//
-// pruning away boxes that have high intersection-over-union (IOU) overlap
-// with previously selected boxes. Bounding boxes are supplied as
-// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
-// diagonal pair of box corners and the coordinates can be provided as normalized
-// (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm
-// is agnostic to where the origin is in the coordinate system. Note that this
-// algorithm is invariant to orthogonal transformations and translations
-// of the coordinate system; thus translating or reflections of the coordinate
-// system result in the same boxes being selected by the algorithm.
-// The output of this operation is a set of integers indexing into the input
-// collection of bounding boxes representing the selected boxes. The bounding
-// box coordinates corresponding to the selected indices can then be obtained
-// using the `tf.gather operation`. For example:
-// selected_indices = tf.image.non_max_suppression(
-// boxes, scores, max_output_size, iou_threshold)
-// selected_boxes = tf.gather(boxes, selected_indices)
+// result is one logical checkpoint, with one physical metadata file and renamed
+// data files.
+//
+// Intended for "grouping" multiple checkpoints in a sharded checkpoint setup.
+//
+// If delete_old_dirs is true, attempts to delete recursively the dirname of each
+// path in the input checkpoint_prefixes. This is useful when those paths are non
+// user-facing temporary locations.
//
// Arguments:
-// boxes: A 2-D float tensor of shape `[num_boxes, 4]`.
-// scores: A 1-D float tensor of shape `[num_boxes]` representing a single
-// score corresponding to each box (each row of boxes).
-// max_output_size: A scalar integer tensor representing the maximum number of
-// boxes to be selected by non max suppression.
+// checkpoint_prefixes: prefixes of V2 checkpoints to merge.
+// destination_prefix: scalar. The desired final prefix. Allowed to be the same
+// as one of the checkpoint_prefixes.
//
-// Returns A 1-D integer tensor of shape `[M]` representing the selected
-// indices from the boxes tensor, where `M <= max_output_size`.
-func NonMaxSuppression(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, optional ...NonMaxSuppressionAttr) (selected_indices tf.Output) {
+// Returns the created operation.
+func MergeV2Checkpoints(scope *Scope, checkpoint_prefixes tf.Output, destination_prefix tf.Output, optional ...MergeV2CheckpointsAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
@@ -17545,115 +17857,191 @@ func NonMaxSuppression(scope *Scope, boxes tf.Output, scores tf.Output, max_outp
a(attrs)
}
opspec := tf.OpSpec{
- Type: "NonMaxSuppression",
+ Type: "MergeV2Checkpoints",
Input: []tf.Input{
- boxes, scores, max_output_size,
+ checkpoint_prefixes, destination_prefix,
},
Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// ResourceApplyAdadeltaAttr is an optional argument to ResourceApplyAdadelta.
-type ResourceApplyAdadeltaAttr func(optionalAttr)
-
-// ResourceApplyAdadeltaUseLocking sets the optional use_locking attribute to value.
+// Saves input tensors slices to disk.
//
-// value: If True, updating of the var, accum and update_accum tensors will be protected by
-// a lock; otherwise the behavior is undefined, but may exhibit less contention.
-// If not specified, defaults to false
-func ResourceApplyAdadeltaUseLocking(value bool) ResourceApplyAdadeltaAttr {
+// This is like `Save` except that tensors can be listed in the saved file as being
+// a slice of a larger tensor. `shapes_and_slices` specifies the shape of the
+// larger tensor and the slice that this tensor covers. `shapes_and_slices` must
+// have as many elements as `tensor_names`.
+//
+// Elements of the `shapes_and_slices` input must either be:
+//
+// * The empty string, in which case the corresponding tensor is
+// saved normally.
+// * A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the
+// `dimI` are the dimensions of the larger tensor and `slice-spec`
+// specifies what part is covered by the tensor to save.
+//
+// `slice-spec` itself is a `:`-separated list: `slice0:slice1:...:sliceN-1`
+// where each `sliceI` is either:
+//
+// * The string `-` meaning that the slice covers all indices of this dimension
+// * `start,length` where `start` and `length` are integers. In that
+// case the slice covers `length` indices starting at `start`.
+//
+// See also `Save`.
+//
+// Arguments:
+// filename: Must have a single element. The name of the file to which we write the
+// tensor.
+// tensor_names: Shape `[N]`. The names of the tensors to be saved.
+// shapes_and_slices: Shape `[N]`. The shapes and slice specifications to use when
+// saving the tensors.
+// data: `N` tensors to save.
+//
+// Returns the created operation.
+func SaveSlices(scope *Scope, filename tf.Output, tensor_names tf.Output, shapes_and_slices tf.Output, data []tf.Output) (o *tf.Operation) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "SaveSlices",
+ Input: []tf.Input{
+ filename, tensor_names, shapes_and_slices, tf.OutputList(data),
+ },
+ }
+ return scope.AddOperation(opspec)
+}
+
+// DenseToDenseSetOperationAttr is an optional argument to DenseToDenseSetOperation.
+type DenseToDenseSetOperationAttr func(optionalAttr)
+
+// DenseToDenseSetOperationValidateIndices sets the optional validate_indices attribute to value.
+// If not specified, defaults to true
+func DenseToDenseSetOperationValidateIndices(value bool) DenseToDenseSetOperationAttr {
return func(m optionalAttr) {
- m["use_locking"] = value
+ m["validate_indices"] = value
}
}
-// Update '*var' according to the adadelta scheme.
+// Applies set operation along last dimension of 2 `Tensor` inputs.
//
-// accum = rho() * accum + (1 - rho()) * grad.square();
-// update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;
-// update_accum = rho() * update_accum + (1 - rho()) * update.square();
-// var -= update;
+// See SetOperationOp::SetOperationFromContext for values of `set_operation`.
+//
+// Output `result` is a `SparseTensor` represented by `result_indices`,
+// `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
+// has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
+// dimension contains the result of `set_operation` applied to the corresponding
+// `[0...n-1]` dimension of `set`.
//
// Arguments:
-// var_: Should be from a Variable().
-// accum: Should be from a Variable().
-// accum_update: Should be from a Variable().
-// lr: Scaling factor. Must be a scalar.
-// rho: Decay factor. Must be a scalar.
-// epsilon: Constant factor. Must be a scalar.
-// grad: The gradient.
+// set1: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.
+// Dimension `n` contains values in a set, duplicates are allowed but ignored.
+// set2: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`.
+// Dimension `n` contains values in a set, duplicates are allowed but ignored.
//
-// Returns the created operation.
-func ResourceApplyAdadelta(scope *Scope, var_ tf.Output, accum tf.Output, accum_update tf.Output, lr tf.Output, rho tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdadeltaAttr) (o *tf.Operation) {
+//
+// Returns 2D indices of a `SparseTensor`.1D values of a `SparseTensor`.1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
+// the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`
+// is the max result set size across all `0...n-1` dimensions.
+func DenseToDenseSetOperation(scope *Scope, set1 tf.Output, set2 tf.Output, set_operation string, optional ...DenseToDenseSetOperationAttr) (result_indices tf.Output, result_values tf.Output, result_shape tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"set_operation": set_operation}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ResourceApplyAdadelta",
+ Type: "DenseToDenseSetOperation",
Input: []tf.Input{
- var_, accum, accum_update, lr, rho, epsilon, grad,
+ set1, set2,
},
Attrs: attrs,
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// StageSizeAttr is an optional argument to StageSize.
-type StageSizeAttr func(optionalAttr)
-
-// StageSizeCapacity sets the optional capacity attribute to value.
-// If not specified, defaults to 0
+// Generate a sharded filename. The filename is printf formatted as
//
-// REQUIRES: value >= 0
-func StageSizeCapacity(value int64) StageSizeAttr {
- return func(m optionalAttr) {
- m["capacity"] = value
+// %s-%05d-of-%05d, basename, shard, num_shards.
+func ShardedFilename(scope *Scope, basename tf.Output, shard tf.Output, num_shards tf.Output) (filename tf.Output) {
+ if scope.Err() != nil {
+ return
}
+ opspec := tf.OpSpec{
+ Type: "ShardedFilename",
+ Input: []tf.Input{
+ basename, shard, num_shards,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// StageSizeMemoryLimit sets the optional memory_limit attribute to value.
-// If not specified, defaults to 0
+// Generate a glob pattern matching all sharded file names.
+func ShardedFilespec(scope *Scope, basename tf.Output, num_shards tf.Output) (filename tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "ShardedFilespec",
+ Input: []tf.Input{
+ basename, num_shards,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
+// TextLineReaderV2Attr is an optional argument to TextLineReaderV2.
+type TextLineReaderV2Attr func(optionalAttr)
+
+// TextLineReaderV2SkipHeaderLines sets the optional skip_header_lines attribute to value.
//
-// REQUIRES: value >= 0
-func StageSizeMemoryLimit(value int64) StageSizeAttr {
+// value: Number of lines to skip from the beginning of every file.
+// If not specified, defaults to 0
+func TextLineReaderV2SkipHeaderLines(value int64) TextLineReaderV2Attr {
return func(m optionalAttr) {
- m["memory_limit"] = value
+ m["skip_header_lines"] = value
}
}
-// StageSizeContainer sets the optional container attribute to value.
+// TextLineReaderV2Container sets the optional container attribute to value.
+//
+// value: If non-empty, this reader is placed in the given container.
+// Otherwise, a default container is used.
// If not specified, defaults to ""
-func StageSizeContainer(value string) StageSizeAttr {
+func TextLineReaderV2Container(value string) TextLineReaderV2Attr {
return func(m optionalAttr) {
m["container"] = value
}
}
-// StageSizeSharedName sets the optional shared_name attribute to value.
+// TextLineReaderV2SharedName sets the optional shared_name attribute to value.
+//
+// value: If non-empty, this reader is named in the given bucket
+// with this shared_name. Otherwise, the node name is used instead.
// If not specified, defaults to ""
-func StageSizeSharedName(value string) StageSizeAttr {
+func TextLineReaderV2SharedName(value string) TextLineReaderV2Attr {
return func(m optionalAttr) {
m["shared_name"] = value
}
}
-// Op returns the number of elements in the underlying container.
-func StageSize(scope *Scope, dtypes []tf.DataType, optional ...StageSizeAttr) (size tf.Output) {
+// A Reader that outputs the lines of a file delimited by '\n'.
+//
+// Returns The handle to reference the Reader.
+func TextLineReaderV2(scope *Scope, optional ...TextLineReaderV2Attr) (reader_handle tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtypes": dtypes}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "StageSize",
+ Type: "TextLineReaderV2",
Attrs: attrs,
}
@@ -17661,132 +18049,173 @@ func StageSize(scope *Scope, dtypes []tf.DataType, optional ...StageSizeAttr) (s
return op.Output(0)
}
-// ResourceScatterNdUpdateAttr is an optional argument to ResourceScatterNdUpdate.
-type ResourceScatterNdUpdateAttr func(optionalAttr)
+// LoadAndRemapMatrixAttr is an optional argument to LoadAndRemapMatrix.
+type LoadAndRemapMatrixAttr func(optionalAttr)
-// ResourceScatterNdUpdateUseLocking sets the optional use_locking attribute to value.
+// LoadAndRemapMatrixMaxRowsInMemory sets the optional max_rows_in_memory attribute to value.
//
-// value: An optional bool. Defaults to True. If True, the assignment will
-// be protected by a lock; otherwise the behavior is undefined,
-// but may exhibit less contention.
-// If not specified, defaults to true
-func ResourceScatterNdUpdateUseLocking(value bool) ResourceScatterNdUpdateAttr {
+// value: The maximum number of rows to load from the checkpoint at
+// once. If less than or equal to 0, the entire matrix will be loaded into
+// memory. Setting this arg trades increased disk reads for lower memory usage.
+// If not specified, defaults to -1
+func LoadAndRemapMatrixMaxRowsInMemory(value int64) LoadAndRemapMatrixAttr {
return func(m optionalAttr) {
- m["use_locking"] = value
+ m["max_rows_in_memory"] = value
}
}
-// Applies sparse `updates` to individual values or slices within a given
-//
-// variable according to `indices`.
-//
-// `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
-//
-// `indices` must be integer tensor, containing indices into `ref`.
-// It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+// Loads a 2-D (matrix) `Tensor` with name `old_tensor_name` from the checkpoint
//
-// The innermost dimension of `indices` (with length `K`) corresponds to
-// indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
-// dimension of `ref`.
+// at `ckpt_path` and potentially reorders its rows and columns using the
+// specified remappings.
//
-// `updates` is `Tensor` of rank `Q-1+P-K` with shape:
+// Most users should use one of the wrapper initializers (such as
+// `tf.contrib.framework.load_and_remap_matrix_initializer`) instead of this
+// function directly.
//
-// ```
-// [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
-// ```
+// The remappings are 1-D tensors with the following properties:
//
-// For example, say we want to update 4 scattered elements to a rank-1 tensor to
-// 8 elements. In Python, that update would look like this:
+// * `row_remapping` must have exactly `num_rows` entries. Row `i` of the output
+// matrix will be initialized from the row corresponding to index
+// `row_remapping[i]` in the old `Tensor` from the checkpoint.
+// * `col_remapping` must have either 0 entries (indicating that no column
+// reordering is needed) or `num_cols` entries. If specified, column `j` of the
+// output matrix will be initialized from the column corresponding to index
+// `col_remapping[j]` in the old `Tensor` from the checkpoint.
+// * A value of -1 in either of the remappings signifies a "missing" entry. In that
+// case, values from the `initializing_values` tensor will be used to fill that
+// missing row or column. If `row_remapping` has `r` missing entries and
+// `col_remapping` has `c` missing entries, then the following condition must be
+// true:
//
-// ```python
-// ref = tfe.Variable([1, 2, 3, 4, 5, 6, 7, 8])
-// indices = tf.constant([[4], [3], [1] ,[7]])
-// updates = tf.constant([9, 10, 11, 12])
-// update = tf.scatter_nd_update(ref, indices, updates)
-// with tf.Session() as sess:
-// print sess.run(update)
-// ```
+// `(r * num_cols) + (c * num_rows) - (r * c) == len(initializing_values)`
//
-// The resulting update to ref would look like this:
+// The remapping tensors can be generated using the GenerateVocabRemapping op.
//
-// [1, 11, 3, 10, 9, 6, 7, 12]
+// As an example, with row_remapping = [1, 0, -1], col_remapping = [0, 2, -1],
+// initializing_values = [0.5, -0.5, 0.25, -0.25, 42], and w(i, j) representing
+// the value from row i, column j of the old tensor in the checkpoint, the output
+// matrix will look like the following:
//
-// See @{tf.scatter_nd} for more details about how to make updates to
-// slices.
+// [[w(1, 0), w(1, 2), 0.5],
+// [w(0, 0), w(0, 2), -0.5],
+// [0.25, -0.25, 42]]
//
// Arguments:
-// ref: A resource handle. Must be from a VarHandleOp.
-// indices: A Tensor. Must be one of the following types: int32, int64.
-// A tensor of indices into ref.
-// updates: A Tensor. Must have the same type as ref. A tensor of updated
-// values to add to ref.
+// ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`) from
+// which the old matrix `Tensor` will be loaded.
+// old_tensor_name: Name of the 2-D `Tensor` to load from checkpoint.
+// row_remapping: An int `Tensor` of row remappings (generally created by
+// `generate_vocab_remapping`). Even if no row remapping is needed, this must
+// still be an index-valued Tensor (e.g. [0, 1, 2, ...]), or a shifted
+// index-valued `Tensor` (e.g. [8, 9, 10, ...], for partitioned `Variables`).
+// col_remapping: An int `Tensor` of column remappings (generally created by
+// `generate_vocab_remapping`). May be a size-0 `Tensor` if only row remapping
+// is to be done (e.g. column ordering is the same).
+// initializing_values: A float `Tensor` containing values to fill in for cells
+// in the output matrix that are not loaded from the checkpoint. Length must be
+// exactly the same as the number of missing / new cells.
+// num_rows: Number of rows (length of the 1st dimension) in the output matrix.
+// num_cols: Number of columns (length of the 2nd dimension) in the output matrix.
//
-// Returns the created operation.
-func ResourceScatterNdUpdate(scope *Scope, ref tf.Output, indices tf.Output, updates tf.Output, optional ...ResourceScatterNdUpdateAttr) (o *tf.Operation) {
+// Returns Output matrix containing existing values loaded from the
+// checkpoint, and with any missing values filled in from initializing_values.
+func LoadAndRemapMatrix(scope *Scope, ckpt_path tf.Output, old_tensor_name tf.Output, row_remapping tf.Output, col_remapping tf.Output, initializing_values tf.Output, num_rows int64, num_cols int64, optional ...LoadAndRemapMatrixAttr) (output_matrix tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"num_rows": num_rows, "num_cols": num_cols}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ResourceScatterNdUpdate",
+ Type: "LoadAndRemapMatrix",
Input: []tf.Input{
- ref, indices, updates,
+ ckpt_path, old_tensor_name, row_remapping, col_remapping, initializing_values,
},
Attrs: attrs,
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Computes the power of one value to another.
+// TFRecordReaderV2Attr is an optional argument to TFRecordReaderV2.
+type TFRecordReaderV2Attr func(optionalAttr)
+
+// TFRecordReaderV2Container sets the optional container attribute to value.
//
-// Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
-// corresponding elements in `x` and `y`. For example:
+// value: If non-empty, this reader is placed in the given container.
+// Otherwise, a default container is used.
+// If not specified, defaults to ""
+func TFRecordReaderV2Container(value string) TFRecordReaderV2Attr {
+ return func(m optionalAttr) {
+ m["container"] = value
+ }
+}
+
+// TFRecordReaderV2SharedName sets the optional shared_name attribute to value.
//
-// ```
-// # tensor 'x' is [[2, 2]], [3, 3]]
-// # tensor 'y' is [[8, 16], [2, 3]]
-// tf.pow(x, y) ==> [[256, 65536], [9, 27]]
-// ```
-func Pow(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
+// value: If non-empty, this reader is named in the given bucket
+// with this shared_name. Otherwise, the node name is used instead.
+// If not specified, defaults to ""
+func TFRecordReaderV2SharedName(value string) TFRecordReaderV2Attr {
+ return func(m optionalAttr) {
+ m["shared_name"] = value
+ }
+}
+
+// TFRecordReaderV2CompressionType sets the optional compression_type attribute to value.
+// If not specified, defaults to ""
+func TFRecordReaderV2CompressionType(value string) TFRecordReaderV2Attr {
+ return func(m optionalAttr) {
+ m["compression_type"] = value
+ }
+}
+
+// A Reader that outputs the records from a TensorFlow Records file.
+//
+// Returns The handle to reference the Reader.
+func TFRecordReaderV2(scope *Scope, optional ...TFRecordReaderV2Attr) (reader_handle tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "Pow",
- Input: []tf.Input{
- x, y,
- },
+ Type: "TFRecordReaderV2",
+
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// SizeAttr is an optional argument to Size.
-type SizeAttr func(optionalAttr)
+// QuantizeAndDequantizeV3Attr is an optional argument to QuantizeAndDequantizeV3.
+type QuantizeAndDequantizeV3Attr func(optionalAttr)
-// SizeOutType sets the optional out_type attribute to value.
-// If not specified, defaults to DT_INT32
-func SizeOutType(value tf.DataType) SizeAttr {
+// QuantizeAndDequantizeV3SignedInput sets the optional signed_input attribute to value.
+// If not specified, defaults to true
+func QuantizeAndDequantizeV3SignedInput(value bool) QuantizeAndDequantizeV3Attr {
return func(m optionalAttr) {
- m["out_type"] = value
+ m["signed_input"] = value
}
}
-// Returns the size of a tensor.
-//
-// This operation returns an integer representing the number of elements in
-// `input`.
-//
-// For example:
+// QuantizeAndDequantizeV3RangeGiven sets the optional range_given attribute to value.
+// If not specified, defaults to true
+func QuantizeAndDequantizeV3RangeGiven(value bool) QuantizeAndDequantizeV3Attr {
+ return func(m optionalAttr) {
+ m["range_given"] = value
+ }
+}
+
+// Quantizes then dequantizes a tensor.
//
-// ```
-// # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
-// size(t) ==> 12
-// ```
-func Size(scope *Scope, input tf.Output, optional ...SizeAttr) (output tf.Output) {
+// This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a
+// tensor, so its value can change during training.
+func QuantizeAndDequantizeV3(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, num_bits tf.Output, optional ...QuantizeAndDequantizeV3Attr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -17795,9 +18224,9 @@ func Size(scope *Scope, input tf.Output, optional ...SizeAttr) (output tf.Output
a(attrs)
}
opspec := tf.OpSpec{
- Type: "Size",
+ Type: "QuantizeAndDequantizeV3",
Input: []tf.Input{
- input,
+ input, input_min, input_max, num_bits,
},
Attrs: attrs,
}
@@ -17805,46 +18234,38 @@ func Size(scope *Scope, input tf.Output, optional ...SizeAttr) (output tf.Output
return op.Output(0)
}
-// ResourceApplyRMSPropAttr is an optional argument to ResourceApplyRMSProp.
-type ResourceApplyRMSPropAttr func(optionalAttr)
+// IdentityReaderV2Attr is an optional argument to IdentityReaderV2.
+type IdentityReaderV2Attr func(optionalAttr)
-// ResourceApplyRMSPropUseLocking sets the optional use_locking attribute to value.
+// IdentityReaderV2Container sets the optional container attribute to value.
//
-// value: If `True`, updating of the var, ms, and mom tensors is protected
-// by a lock; otherwise the behavior is undefined, but may exhibit less
-// contention.
-// If not specified, defaults to false
-func ResourceApplyRMSPropUseLocking(value bool) ResourceApplyRMSPropAttr {
+// value: If non-empty, this reader is placed in the given container.
+// Otherwise, a default container is used.
+// If not specified, defaults to ""
+func IdentityReaderV2Container(value string) IdentityReaderV2Attr {
return func(m optionalAttr) {
- m["use_locking"] = value
+ m["container"] = value
}
}
-// Update '*var' according to the RMSProp algorithm.
-//
-// Note that in dense implementation of this algorithm, ms and mom will
-// update even if the grad is zero, but in this sparse implementation, ms
-// and mom will not update in iterations during which the grad is zero.
-//
-// mean_square = decay * mean_square + (1-decay) * gradient ** 2
-// Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
-//
-// ms <- rho * ms_{t-1} + (1-rho) * grad * grad
-// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
-// var <- var - mom
+// IdentityReaderV2SharedName sets the optional shared_name attribute to value.
//
-// Arguments:
-// var_: Should be from a Variable().
-// ms: Should be from a Variable().
-// mom: Should be from a Variable().
-// lr: Scaling factor. Must be a scalar.
-// rho: Decay rate. Must be a scalar.
+// value: If non-empty, this reader is named in the given bucket
+// with this shared_name. Otherwise, the node name is used instead.
+// If not specified, defaults to ""
+func IdentityReaderV2SharedName(value string) IdentityReaderV2Attr {
+ return func(m optionalAttr) {
+ m["shared_name"] = value
+ }
+}
+
+// A Reader that outputs the queued work as both the key and value.
//
-// epsilon: Ridge term. Must be a scalar.
-// grad: The gradient.
+// To use, enqueue strings in a Queue. ReaderRead will take the front
+// work string and output (work, work).
//
-// Returns the created operation.
-func ResourceApplyRMSProp(scope *Scope, var_ tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyRMSPropAttr) (o *tf.Operation) {
+// Returns The handle to reference the Reader.
+func IdentityReaderV2(scope *Scope, optional ...IdentityReaderV2Attr) (reader_handle tf.Output) {
if scope.Err() != nil {
return
}
@@ -17853,61 +18274,37 @@ func ResourceApplyRMSProp(scope *Scope, var_ tf.Output, ms tf.Output, mom tf.Out
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ResourceApplyRMSProp",
- Input: []tf.Input{
- var_, ms, mom, lr, rho, momentum, epsilon, grad,
- },
+ Type: "IdentityReaderV2",
+
Attrs: attrs,
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// ResourceApplyAdamAttr is an optional argument to ResourceApplyAdam.
-type ResourceApplyAdamAttr func(optionalAttr)
+// ResourceApplyGradientDescentAttr is an optional argument to ResourceApplyGradientDescent.
+type ResourceApplyGradientDescentAttr func(optionalAttr)
-// ResourceApplyAdamUseLocking sets the optional use_locking attribute to value.
+// ResourceApplyGradientDescentUseLocking sets the optional use_locking attribute to value.
//
-// value: If `True`, updating of the var, m, and v tensors will be protected
-// by a lock; otherwise the behavior is undefined, but may exhibit less
-// contention.
+// value: If `True`, the subtraction will be protected by a lock;
+// otherwise the behavior is undefined, but may exhibit less contention.
// If not specified, defaults to false
-func ResourceApplyAdamUseLocking(value bool) ResourceApplyAdamAttr {
+func ResourceApplyGradientDescentUseLocking(value bool) ResourceApplyGradientDescentAttr {
return func(m optionalAttr) {
m["use_locking"] = value
}
}
-// ResourceApplyAdamUseNesterov sets the optional use_nesterov attribute to value.
-//
-// value: If `True`, uses the nesterov update.
-// If not specified, defaults to false
-func ResourceApplyAdamUseNesterov(value bool) ResourceApplyAdamAttr {
- return func(m optionalAttr) {
- m["use_nesterov"] = value
- }
-}
-
-// Update '*var' according to the Adam algorithm.
-//
-// lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t)
-// m_t <- beta1 * m_{t-1} + (1 - beta1) * g_t
-// v_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t
-// variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)
+// Update '*var' by subtracting 'alpha' * 'delta' from it.
//
// Arguments:
// var_: Should be from a Variable().
-// m: Should be from a Variable().
-// v: Should be from a Variable().
-// beta1_power: Must be a scalar.
-// beta2_power: Must be a scalar.
-// lr: Scaling factor. Must be a scalar.
-// beta1: Momentum factor. Must be a scalar.
-// beta2: Momentum factor. Must be a scalar.
-// epsilon: Ridge term. Must be a scalar.
-// grad: The gradient.
+// alpha: Scaling factor. Must be a scalar.
+// delta: The change.
//
// Returns the created operation.
-func ResourceApplyAdam(scope *Scope, var_ tf.Output, m tf.Output, v tf.Output, beta1_power tf.Output, beta2_power tf.Output, lr tf.Output, beta1 tf.Output, beta2 tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdamAttr) (o *tf.Operation) {
+func ResourceApplyGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, delta tf.Output, optional ...ResourceApplyGradientDescentAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
@@ -17916,176 +18313,160 @@ func ResourceApplyAdam(scope *Scope, var_ tf.Output, m tf.Output, v tf.Output, b
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ResourceApplyAdam",
+ Type: "ResourceApplyGradientDescent",
Input: []tf.Input{
- var_, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad,
+ var_, alpha, delta,
},
Attrs: attrs,
}
return scope.AddOperation(opspec)
}
-// 3D fast Fourier transform.
+// Returns the next record (key, value pair) produced by a Reader.
//
-// Computes the 3-dimensional discrete Fourier transform over the inner-most 3
-// dimensions of `input`.
+// Will dequeue from the input queue if necessary (e.g. when the
+// Reader needs to start reading from a new file since it has finished
+// with the previous file).
//
// Arguments:
-// input: A complex64 tensor.
-//
-// Returns A complex64 tensor of the same shape as `input`. The inner-most 3
-// dimensions of `input` are replaced with their 3D Fourier transform.
+// reader_handle: Handle to a Reader.
+// queue_handle: Handle to a Queue, with string work items.
//
-// @compatibility(numpy)
-// Equivalent to np.fft.fftn with 3 dimensions.
-// @end_compatibility
-func FFT3D(scope *Scope, input tf.Output) (output tf.Output) {
+// Returns A scalar.A scalar.
+func ReaderReadV2(scope *Scope, reader_handle tf.Output, queue_handle tf.Output) (key tf.Output, value tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "FFT3D",
+ Type: "ReaderReadV2",
Input: []tf.Input{
- input,
+ reader_handle, queue_handle,
},
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1)
}
-// Deserialize `SparseTensor` objects.
-//
-// The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where
-// the last dimension stores serialized `SparseTensor` objects and the other N
-// dimensions (N >= 0) correspond to a batch. The ranks of the original
-// `SparseTensor` objects must all match. When the final `SparseTensor` is
-// created, its rank is the rank of the incoming `SparseTensor` objects plus N;
-// the sparse tensors have been concatenated along new dimensions, one for each
-// batch.
-//
-// The output `SparseTensor` object's shape values for the original dimensions
-// are the max across the input `SparseTensor` objects' shape values for the
-// corresponding dimensions. The new dimensions match the size of the batch.
-//
-// The input `SparseTensor` objects' indices are assumed ordered in
-// standard lexicographic order. If this is not the case, after this
-// step run `SparseReorder` to restore index ordering.
-//
-// For example, if the serialized input is a `[2 x 3]` matrix representing two
-// original `SparseTensor` objects:
-//
-// index = [ 0]
-// [10]
-// [20]
-// values = [1, 2, 3]
-// shape = [50]
-//
-// and
-//
-// index = [ 2]
-// [10]
-// values = [4, 5]
-// shape = [30]
-//
-// then the final deserialized `SparseTensor` will be:
+// Returns up to `num_records` (key, value) pairs produced by a Reader.
//
-// index = [0 0]
-// [0 10]
-// [0 20]
-// [1 2]
-// [1 10]
-// values = [1, 2, 3, 4, 5]
-// shape = [2 50]
+// Will dequeue from the input queue if necessary (e.g. when the
+// Reader needs to start reading from a new file since it has finished
+// with the previous file).
+// It may return less than `num_records` even before the last batch.
//
// Arguments:
-// serialized_sparse: The serialized `SparseTensor` objects. The last dimension
-// must have 3 columns.
-// dtype: The `dtype` of the serialized `SparseTensor` objects.
-func DeserializeSparse(scope *Scope, serialized_sparse tf.Output, dtype tf.DataType) (sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output) {
+// reader_handle: Handle to a `Reader`.
+// queue_handle: Handle to a `Queue`, with string work items.
+// num_records: number of records to read from `Reader`.
+//
+// Returns A 1-D tensor.A 1-D tensor.
+func ReaderReadUpToV2(scope *Scope, reader_handle tf.Output, queue_handle tf.Output, num_records tf.Output) (keys tf.Output, values tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtype": dtype}
opspec := tf.OpSpec{
- Type: "DeserializeSparse",
+ Type: "ReaderReadUpToV2",
Input: []tf.Input{
- serialized_sparse,
+ reader_handle, queue_handle, num_records,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
+ return op.Output(0), op.Output(1)
}
-// Elementwise computes the bitwise XOR of `x` and `y`.
+// Restore a Reader to its initial clean state.
//
-// The result will have those bits set, that are different in `x` and `y`. The
-// computation is performed on the underlying representations of `x` and `y`.
-func BitwiseXor(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
+// Arguments:
+// reader_handle: Handle to a Reader.
+//
+// Returns the created operation.
+func ReaderResetV2(scope *Scope, reader_handle tf.Output) (o *tf.Operation) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "BitwiseXor",
+ Type: "ReaderResetV2",
Input: []tf.Input{
- x, y,
+ reader_handle,
},
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// Creates a summary file writer accessible by the given resource handle.
+// Adjust the hue of one or more images.
+//
+// `images` is a tensor of at least 3 dimensions. The last dimension is
+// interpretted as channels, and must be three.
+//
+// The input image is considered in the RGB colorspace. Conceptually, the RGB
+// colors are first mapped into HSV. A delta is then applied all the hue values,
+// and then remapped back to RGB colorspace.
//
// Arguments:
-// writer: A handle to the summary writer resource
-// logdir: Directory where the event file will be written.
-// max_queue: Size of the queue of pending events and summaries.
-// flush_millis: How often, in milliseconds, to flush the pending events and
-// summaries to disk.
-// filename_suffix: Every event file's name is suffixed with this suffix.
+// images: Images to adjust. At least 3-D.
+// delta: A float delta to add to the hue.
//
-// Returns the created operation.
-func CreateSummaryFileWriter(scope *Scope, writer tf.Output, logdir tf.Output, max_queue tf.Output, flush_millis tf.Output, filename_suffix tf.Output) (o *tf.Operation) {
+// Returns The hue-adjusted image or images.
+func AdjustHue(scope *Scope, images tf.Output, delta tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "CreateSummaryFileWriter",
+ Type: "AdjustHue",
Input: []tf.Input{
- writer, logdir, max_queue, flush_millis, filename_suffix,
+ images, delta,
},
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// EncodeBase64Attr is an optional argument to EncodeBase64.
-type EncodeBase64Attr func(optionalAttr)
+// ResourceApplyAdamAttr is an optional argument to ResourceApplyAdam.
+type ResourceApplyAdamAttr func(optionalAttr)
-// EncodeBase64Pad sets the optional pad attribute to value.
+// ResourceApplyAdamUseLocking sets the optional use_locking attribute to value.
//
-// value: Bool whether padding is applied at the ends.
+// value: If `True`, updating of the var, m, and v tensors will be protected
+// by a lock; otherwise the behavior is undefined, but may exhibit less
+// contention.
// If not specified, defaults to false
-func EncodeBase64Pad(value bool) EncodeBase64Attr {
+func ResourceApplyAdamUseLocking(value bool) ResourceApplyAdamAttr {
return func(m optionalAttr) {
- m["pad"] = value
+ m["use_locking"] = value
}
}
-// Encode strings into web-safe base64 format.
+// ResourceApplyAdamUseNesterov sets the optional use_nesterov attribute to value.
//
-// Refer to the following article for more information on base64 format:
-// en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the
-// end so that the encoded has length multiple of 4. See Padding section of the
-// link above.
+// value: If `True`, uses the nesterov update.
+// If not specified, defaults to false
+func ResourceApplyAdamUseNesterov(value bool) ResourceApplyAdamAttr {
+ return func(m optionalAttr) {
+ m["use_nesterov"] = value
+ }
+}
+
+// Update '*var' according to the Adam algorithm.
//
-// Web-safe means that the encoder uses - and _ instead of + and /.
+// lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t)
+// m_t <- beta1 * m_{t-1} + (1 - beta1) * g_t
+// v_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t
+// variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)
//
// Arguments:
-// input: Strings to be encoded.
+// var_: Should be from a Variable().
+// m: Should be from a Variable().
+// v: Should be from a Variable().
+// beta1_power: Must be a scalar.
+// beta2_power: Must be a scalar.
+// lr: Scaling factor. Must be a scalar.
+// beta1: Momentum factor. Must be a scalar.
+// beta2: Momentum factor. Must be a scalar.
+// epsilon: Ridge term. Must be a scalar.
+// grad: The gradient.
//
-// Returns Input strings encoded in base64.
-func EncodeBase64(scope *Scope, input tf.Output, optional ...EncodeBase64Attr) (output tf.Output) {
+// Returns the created operation.
+func ResourceApplyAdam(scope *Scope, var_ tf.Output, m tf.Output, v tf.Output, beta1_power tf.Output, beta2_power tf.Output, lr tf.Output, beta1 tf.Output, beta2 tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdamAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
@@ -18094,100 +18475,109 @@ func EncodeBase64(scope *Scope, input tf.Output, optional ...EncodeBase64Attr) (
a(attrs)
}
opspec := tf.OpSpec{
- Type: "EncodeBase64",
+ Type: "ResourceApplyAdam",
Input: []tf.Input{
- input,
+ var_, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad,
},
Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// VarHandleOpAttr is an optional argument to VarHandleOp.
-type VarHandleOpAttr func(optionalAttr)
-
-// VarHandleOpContainer sets the optional container attribute to value.
+// Store the input tensor in the state of the current session.
//
-// value: the container this variable is placed in.
-// If not specified, defaults to ""
-func VarHandleOpContainer(value string) VarHandleOpAttr {
- return func(m optionalAttr) {
- m["container"] = value
- }
-}
-
-// VarHandleOpSharedName sets the optional shared_name attribute to value.
+// Arguments:
+// value: The tensor to be stored.
//
-// value: the name by which this variable is referred to.
-// If not specified, defaults to ""
-func VarHandleOpSharedName(value string) VarHandleOpAttr {
- return func(m optionalAttr) {
- m["shared_name"] = value
+// Returns The handle for the tensor stored in the session state, represented
+// as a ResourceHandle object.
+func GetSessionHandleV2(scope *Scope, value tf.Output) (handle tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "GetSessionHandleV2",
+ Input: []tf.Input{
+ value,
+ },
}
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Creates a handle to a Variable resource.
+// Returns the set of files matching one or more glob patterns.
+//
+// Note that this routine only supports wildcard characters in the
+// basename portion of the pattern, not in the directory portion.
//
// Arguments:
-// dtype: the type of this variable. Must agree with the dtypes
-// of all ops using this variable.
-// shape: The (possibly partially specified) shape of this variable.
-func VarHandleOp(scope *Scope, dtype tf.DataType, shape tf.Shape, optional ...VarHandleOpAttr) (resource tf.Output) {
+// pattern: Shell wildcard pattern(s). Scalar or vector of type string.
+//
+// Returns A vector of matching filenames.
+func MatchingFiles(scope *Scope, pattern tf.Output) (filenames tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "VarHandleOp",
-
- Attrs: attrs,
+ Type: "MatchingFiles",
+ Input: []tf.Input{
+ pattern,
+ },
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Output a fact about factorials.
-func Fact(scope *Scope) (fact tf.Output) {
+// Computes gradients for SparseSegmentMean.
+//
+// Returns tensor "output" with same shape as grad, except for dimension 0 whose
+// value is output_dim0.
+//
+// Arguments:
+// grad: gradient propagated to the SparseSegmentMean op.
+// indices: indices passed to the corresponding SparseSegmentMean op.
+// segment_ids: segment_ids passed to the corresponding SparseSegmentMean op.
+// output_dim0: dimension 0 of "data" passed to SparseSegmentMean op.
+func SparseSegmentMeanGrad(scope *Scope, grad tf.Output, indices tf.Output, segment_ids tf.Output, output_dim0 tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Fact",
+ Type: "SparseSegmentMeanGrad",
+ Input: []tf.Input{
+ grad, indices, segment_ids, output_dim0,
+ },
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// StatelessRandomUniformAttr is an optional argument to StatelessRandomUniform.
-type StatelessRandomUniformAttr func(optionalAttr)
+// SummaryWriterAttr is an optional argument to SummaryWriter.
+type SummaryWriterAttr func(optionalAttr)
-// StatelessRandomUniformDtype sets the optional dtype attribute to value.
-//
-// value: The type of the output.
-// If not specified, defaults to DT_FLOAT
-func StatelessRandomUniformDtype(value tf.DataType) StatelessRandomUniformAttr {
+// SummaryWriterSharedName sets the optional shared_name attribute to value.
+// If not specified, defaults to ""
+func SummaryWriterSharedName(value string) SummaryWriterAttr {
return func(m optionalAttr) {
- m["dtype"] = value
+ m["shared_name"] = value
}
}
-// Outputs deterministic pseudorandom random values from a uniform distribution.
-//
-// The generated values follow a uniform distribution in the range `[0, 1)`. The
-// lower bound 0 is included in the range, while the upper bound 1 is excluded.
-//
-// The outputs are a deterministic function of `shape` and `seed`.
+// SummaryWriterContainer sets the optional container attribute to value.
+// If not specified, defaults to ""
+func SummaryWriterContainer(value string) SummaryWriterAttr {
+ return func(m optionalAttr) {
+ m["container"] = value
+ }
+}
+
+// Returns a handle to be used to access a summary writer.
//
-// Arguments:
-// shape: The shape of the output tensor.
-// seed: 2 seeds (shape [2]).
+// The summary writer is an in-graph resource which can be used by ops to write
+// summaries to event files.
//
-// Returns Random values with specified shape.
-func StatelessRandomUniform(scope *Scope, shape tf.Output, seed tf.Output, optional ...StatelessRandomUniformAttr) (output tf.Output) {
+// Returns the summary writer resource. Scalar handle.
+func SummaryWriter(scope *Scope, optional ...SummaryWriterAttr) (writer tf.Output) {
if scope.Err() != nil {
return
}
@@ -18196,99 +18586,51 @@ func StatelessRandomUniform(scope *Scope, shape tf.Output, seed tf.Output, optio
a(attrs)
}
opspec := tf.OpSpec{
- Type: "StatelessRandomUniform",
- Input: []tf.Input{
- shape, seed,
- },
+ Type: "SummaryWriter",
+
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// LoadAndRemapMatrixAttr is an optional argument to LoadAndRemapMatrix.
-type LoadAndRemapMatrixAttr func(optionalAttr)
+// ResizeBicubicGradAttr is an optional argument to ResizeBicubicGrad.
+type ResizeBicubicGradAttr func(optionalAttr)
-// LoadAndRemapMatrixMaxRowsInMemory sets the optional max_rows_in_memory attribute to value.
+// ResizeBicubicGradAlignCorners sets the optional align_corners attribute to value.
//
-// value: The maximum number of rows to load from the checkpoint at
-// once. If less than or equal to 0, the entire matrix will be loaded into
-// memory. Setting this arg trades increased disk reads for lower memory usage.
-// If not specified, defaults to -1
-func LoadAndRemapMatrixMaxRowsInMemory(value int64) LoadAndRemapMatrixAttr {
+// value: If true, rescale grads by (orig_height - 1) / (height - 1), which
+// exactly aligns the 4 corners of grads and original_image. If false, rescale by
+// orig_height / height. Treat similarly the width dimension.
+// If not specified, defaults to false
+func ResizeBicubicGradAlignCorners(value bool) ResizeBicubicGradAttr {
return func(m optionalAttr) {
- m["max_rows_in_memory"] = value
+ m["align_corners"] = value
}
}
-// Loads a 2-D (matrix) `Tensor` with name `old_tensor_name` from the checkpoint
-//
-// at `ckpt_path` and potentially reorders its rows and columns using the
-// specified remappings.
-//
-// Most users should use one of the wrapper initializers (such as
-// `tf.contrib.framework.load_and_remap_matrix_initializer`) instead of this
-// function directly.
-//
-// The remappings are 1-D tensors with the following properties:
-//
-// * `row_remapping` must have exactly `num_rows` entries. Row `i` of the output
-// matrix will be initialized from the row corresponding to index
-// `row_remapping[i]` in the old `Tensor` from the checkpoint.
-// * `col_remapping` must have either 0 entries (indicating that no column
-// reordering is needed) or `num_cols` entries. If specified, column `j` of the
-// output matrix will be initialized from the column corresponding to index
-// `col_remapping[j]` in the old `Tensor` from the checkpoint.
-// * A value of -1 in either of the remappings signifies a "missing" entry. In that
-// case, values from the `initializing_values` tensor will be used to fill that
-// missing row or column. If `row_remapping` has `r` missing entries and
-// `col_remapping` has `c` missing entries, then the following condition must be
-// true:
-//
-// `(r * num_cols) + (c * num_rows) - (r * c) == len(initializing_values)`
-//
-// The remapping tensors can be generated using the GenerateVocabRemapping op.
-//
-// As an example, with row_remapping = [1, 0, -1], col_remapping = [0, 2, -1],
-// initializing_values = [0.5, -0.5, 0.25, -0.25, 42], and w(i, j) representing
-// the value from row i, column j of the old tensor in the checkpoint, the output
-// matrix will look like the following:
-//
-// [[w(1, 0), w(1, 2), 0.5],
-// [w(0, 0), w(0, 2), -0.5],
-// [0.25, -0.25, 42]]
+// Computes the gradient of bicubic interpolation.
//
// Arguments:
-// ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`) from
-// which the old matrix `Tensor` will be loaded.
-// old_tensor_name: Name of the 2-D `Tensor` to load from checkpoint.
-// row_remapping: An int `Tensor` of row remappings (generally created by
-// `generate_vocab_remapping`). Even if no row remapping is needed, this must
-// still be an index-valued Tensor (e.g. [0, 1, 2, ...]), or a shifted
-// index-valued `Tensor` (e.g. [8, 9, 10, ...], for partitioned `Variables`).
-// col_remapping: An int `Tensor` of column remappings (generally created by
-// `generate_vocab_remapping`). May be a size-0 `Tensor` if only row remapping
-// is to be done (e.g. column ordering is the same).
-// initializing_values: A float `Tensor` containing values to fill in for cells
-// in the output matrix that are not loaded from the checkpoint. Length must be
-// exactly the same as the number of missing / new cells.
-// num_rows: Number of rows (length of the 1st dimension) in the output matrix.
-// num_cols: Number of columns (length of the 2nd dimension) in the output matrix.
+// grads: 4-D with shape `[batch, height, width, channels]`.
+// original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`,
+// The image tensor that was resized.
//
-// Returns Output matrix containing existing values loaded from the
-// checkpoint, and with any missing values filled in from initializing_values.
-func LoadAndRemapMatrix(scope *Scope, ckpt_path tf.Output, old_tensor_name tf.Output, row_remapping tf.Output, col_remapping tf.Output, initializing_values tf.Output, num_rows int64, num_cols int64, optional ...LoadAndRemapMatrixAttr) (output_matrix tf.Output) {
+// Returns 4-D with shape `[batch, orig_height, orig_width, channels]`.
+// Gradients with respect to the input image. Input image must have been
+// float or double.
+func ResizeBicubicGrad(scope *Scope, grads tf.Output, original_image tf.Output, optional ...ResizeBicubicGradAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"num_rows": num_rows, "num_cols": num_cols}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "LoadAndRemapMatrix",
+ Type: "ResizeBicubicGrad",
Input: []tf.Input{
- ckpt_path, old_tensor_name, row_remapping, col_remapping, initializing_values,
+ grads, original_image,
},
Attrs: attrs,
}
@@ -18296,50 +18638,22 @@ func LoadAndRemapMatrix(scope *Scope, ckpt_path tf.Output, old_tensor_name tf.Ou
return op.Output(0)
}
-// Checks whether a resource handle-based variable has been initialized.
-//
-// Arguments:
-// resource: the input resource handle.
-//
-// Returns a scalar boolean which is true if the variable has been
-// initialized.
-func VarIsInitializedOp(scope *Scope, resource tf.Output) (is_initialized tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "VarIsInitializedOp",
- Input: []tf.Input{
- resource,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// ResizeAreaAttr is an optional argument to ResizeArea.
-type ResizeAreaAttr func(optionalAttr)
+// ResizeNearestNeighborAttr is an optional argument to ResizeNearestNeighbor.
+type ResizeNearestNeighborAttr func(optionalAttr)
-// ResizeAreaAlignCorners sets the optional align_corners attribute to value.
+// ResizeNearestNeighborAlignCorners sets the optional align_corners attribute to value.
//
// value: If true, rescale input by (new_height - 1) / (height - 1), which
// exactly aligns the 4 corners of images and resized images. If false, rescale
// by new_height / height. Treat similarly the width dimension.
// If not specified, defaults to false
-func ResizeAreaAlignCorners(value bool) ResizeAreaAttr {
+func ResizeNearestNeighborAlignCorners(value bool) ResizeNearestNeighborAttr {
return func(m optionalAttr) {
m["align_corners"] = value
}
}
-// Resize `images` to `size` using area interpolation.
-//
-// Input images can be of different types but output images are always float.
-//
-// Each output pixel is computed by first transforming the pixel's footprint into
-// the input tensor and then averaging the pixels that intersect the footprint. An
-// input pixel's contribution to the average is weighted by the fraction of its
-// area that intersects the footprint. This is the same as OpenCV's INTER_AREA.
+// Resize `images` to `size` using nearest neighbor interpolation.
//
// Arguments:
// images: 4-D with shape `[batch, height, width, channels]`.
@@ -18348,7 +18662,7 @@ func ResizeAreaAlignCorners(value bool) ResizeAreaAttr {
//
// Returns 4-D with shape
// `[batch, new_height, new_width, channels]`.
-func ResizeArea(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeAreaAttr) (resized_images tf.Output) {
+func ResizeNearestNeighbor(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeNearestNeighborAttr) (resized_images tf.Output) {
if scope.Err() != nil {
return
}
@@ -18357,7 +18671,7 @@ func ResizeArea(scope *Scope, images tf.Output, size tf.Output, optional ...Resi
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ResizeArea",
+ Type: "ResizeNearestNeighbor",
Input: []tf.Input{
images, size,
},
@@ -18367,31 +18681,31 @@ func ResizeArea(scope *Scope, images tf.Output, size tf.Output, optional ...Resi
return op.Output(0)
}
-// RealAttr is an optional argument to Real.
-type RealAttr func(optionalAttr)
+// ResizeNearestNeighborGradAttr is an optional argument to ResizeNearestNeighborGrad.
+type ResizeNearestNeighborGradAttr func(optionalAttr)
-// RealTout sets the optional Tout attribute to value.
-// If not specified, defaults to DT_FLOAT
-func RealTout(value tf.DataType) RealAttr {
+// ResizeNearestNeighborGradAlignCorners sets the optional align_corners attribute to value.
+//
+// value: If true, rescale grads by (orig_height - 1) / (height - 1), which
+// exactly aligns the 4 corners of grads and original_image. If false, rescale by
+// orig_height / height. Treat similarly the width dimension.
+// If not specified, defaults to false
+func ResizeNearestNeighborGradAlignCorners(value bool) ResizeNearestNeighborGradAttr {
return func(m optionalAttr) {
- m["Tout"] = value
+ m["align_corners"] = value
}
}
-// Returns the real part of a complex number.
-//
-// Given a tensor `input` of complex numbers, this operation returns a tensor of
-// type `float` that is the real part of each element in `input`. All elements in
-// `input` must be complex numbers of the form \\(a + bj\\), where *a* is the real
-// part returned by this operation and *b* is the imaginary part.
+// Computes the gradient of nearest neighbor interpolation.
//
-// For example:
+// Arguments:
+// grads: 4-D with shape `[batch, height, width, channels]`.
+// size: = A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The
+// original input size.
//
-// ```
-// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
-// tf.real(input) ==> [-2.25, 3.25]
-// ```
-func Real(scope *Scope, input tf.Output, optional ...RealAttr) (output tf.Output) {
+// Returns 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients
+// with respect to the input image.
+func ResizeNearestNeighborGrad(scope *Scope, grads tf.Output, size tf.Output, optional ...ResizeNearestNeighborGradAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -18400,9 +18714,9 @@ func Real(scope *Scope, input tf.Output, optional ...RealAttr) (output tf.Output
a(attrs)
}
opspec := tf.OpSpec{
- Type: "Real",
+ Type: "ResizeNearestNeighborGrad",
Input: []tf.Input{
- input,
+ grads, size,
},
Attrs: attrs,
}
@@ -18410,76 +18724,103 @@ func Real(scope *Scope, input tf.Output, optional ...RealAttr) (output tf.Output
return op.Output(0)
}
-// 2D real-valued fast Fourier transform.
-//
-// Computes the 2-dimensional discrete Fourier transform of a real-valued signal
-// over the inner-most 2 dimensions of `input`.
-//
-// Since the DFT of a real signal is Hermitian-symmetric, `RFFT2D` only returns the
-// `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
-// of `output`: the zero-frequency term, followed by the `fft_length / 2`
-// positive-frequency terms.
-//
-// Along each axis `RFFT2D` is computed on, if `fft_length` is smaller than the
-// corresponding dimension of `input`, the dimension is cropped. If it is larger,
-// the dimension is padded with zeros.
+// DecodeJpegAttr is an optional argument to DecodeJpeg.
+type DecodeJpegAttr func(optionalAttr)
+
+// DecodeJpegChannels sets the optional channels attribute to value.
//
-// Arguments:
-// input: A float32 tensor.
-// fft_length: An int32 tensor of shape [2]. The FFT length for each dimension.
+// value: Number of color channels for the decoded image.
+// If not specified, defaults to 0
+func DecodeJpegChannels(value int64) DecodeJpegAttr {
+ return func(m optionalAttr) {
+ m["channels"] = value
+ }
+}
+
+// DecodeJpegRatio sets the optional ratio attribute to value.
//
-// Returns A complex64 tensor of the same rank as `input`. The inner-most 2
-// dimensions of `input` are replaced with their 2D Fourier transform. The
-// inner-most dimension contains `fft_length / 2 + 1` unique frequency
-// components.
+// value: Downscaling ratio.
+// If not specified, defaults to 1
+func DecodeJpegRatio(value int64) DecodeJpegAttr {
+ return func(m optionalAttr) {
+ m["ratio"] = value
+ }
+}
+
+// DecodeJpegFancyUpscaling sets the optional fancy_upscaling attribute to value.
//
-// @compatibility(numpy)
-// Equivalent to np.fft.rfft2
-// @end_compatibility
-func RFFT2D(scope *Scope, input tf.Output, fft_length tf.Output) (output tf.Output) {
- if scope.Err() != nil {
- return
+// value: If true use a slower but nicer upscaling of the
+// chroma planes (yuv420/422 only).
+// If not specified, defaults to true
+func DecodeJpegFancyUpscaling(value bool) DecodeJpegAttr {
+ return func(m optionalAttr) {
+ m["fancy_upscaling"] = value
}
- opspec := tf.OpSpec{
- Type: "RFFT2D",
- Input: []tf.Input{
- input, fft_length,
- },
+}
+
+// DecodeJpegTryRecoverTruncated sets the optional try_recover_truncated attribute to value.
+//
+// value: If true try to recover an image from truncated input.
+// If not specified, defaults to false
+func DecodeJpegTryRecoverTruncated(value bool) DecodeJpegAttr {
+ return func(m optionalAttr) {
+ m["try_recover_truncated"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// ResourceSparseApplyAdagradAttr is an optional argument to ResourceSparseApplyAdagrad.
-type ResourceSparseApplyAdagradAttr func(optionalAttr)
+// DecodeJpegAcceptableFraction sets the optional acceptable_fraction attribute to value.
+//
+// value: The minimum required fraction of lines before a truncated
+// input is accepted.
+// If not specified, defaults to 1
+func DecodeJpegAcceptableFraction(value float32) DecodeJpegAttr {
+ return func(m optionalAttr) {
+ m["acceptable_fraction"] = value
+ }
+}
-// ResourceSparseApplyAdagradUseLocking sets the optional use_locking attribute to value.
+// DecodeJpegDctMethod sets the optional dct_method attribute to value.
//
-// value: If `True`, updating of the var and accum tensors will be protected
-// by a lock; otherwise the behavior is undefined, but may exhibit less
-// contention.
-// If not specified, defaults to false
-func ResourceSparseApplyAdagradUseLocking(value bool) ResourceSparseApplyAdagradAttr {
+// value: string specifying a hint about the algorithm used for
+// decompression. Defaults to "" which maps to a system-specific
+// default. Currently valid values are ["INTEGER_FAST",
+// "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal
+// jpeg library changes to a version that does not have that specific
+// option.)
+// If not specified, defaults to ""
+func DecodeJpegDctMethod(value string) DecodeJpegAttr {
return func(m optionalAttr) {
- m["use_locking"] = value
+ m["dct_method"] = value
}
}
-// Update relevant entries in '*var' and '*accum' according to the adagrad scheme.
+// Decode a JPEG-encoded image to a uint8 tensor.
//
-// That is for rows we have grad for, we update var and accum as follows:
-// accum += grad * grad
-// var -= lr * grad * (1 / sqrt(accum))
+// The attr `channels` indicates the desired number of color channels for the
+// decoded image.
+//
+// Accepted values are:
+//
+// * 0: Use the number of channels in the JPEG-encoded image.
+// * 1: output a grayscale image.
+// * 3: output an RGB image.
+//
+// If needed, the JPEG-encoded image is transformed to match the requested number
+// of color channels.
+//
+// The attr `ratio` allows downscaling the image by an integer factor during
+// decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than
+// downscaling the image later.
+//
+//
+// This op also supports decoding PNGs and non-animated GIFs since the interface is
+// the same, though it is cleaner to use `tf.image.decode_image`.
//
// Arguments:
-// var_: Should be from a Variable().
-// accum: Should be from a Variable().
-// lr: Learning rate. Must be a scalar.
-// grad: The gradient.
-// indices: A vector of indices into the first dimension of var and accum.
+// contents: 0-D. The JPEG-encoded image.
//
-// Returns the created operation.
-func ResourceSparseApplyAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyAdagradAttr) (o *tf.Operation) {
+// Returns 3-D with shape `[height, width, channels]`..
+func DecodeJpeg(scope *Scope, contents tf.Output, optional ...DecodeJpegAttr) (image tf.Output) {
if scope.Err() != nil {
return
}
@@ -18488,25 +18829,50 @@ func ResourceSparseApplyAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, l
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ResourceSparseApplyAdagrad",
+ Type: "DecodeJpeg",
Input: []tf.Input{
- var_, accum, lr, grad, indices,
+ contents,
},
Attrs: attrs,
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Creates a dataset that zips together `input_datasets`.
-func ZipDataset(scope *Scope, input_datasets []tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
+// ExtractJpegShapeAttr is an optional argument to ExtractJpegShape.
+type ExtractJpegShapeAttr func(optionalAttr)
+
+// ExtractJpegShapeOutputType sets the optional output_type attribute to value.
+//
+// value: (Optional) The output type of the operation (int32 or int64).
+// Defaults to int32.
+// If not specified, defaults to DT_INT32
+func ExtractJpegShapeOutputType(value tf.DataType) ExtractJpegShapeAttr {
+ return func(m optionalAttr) {
+ m["output_type"] = value
+ }
+}
+
+// Extract the shape information of a JPEG-encoded image.
+//
+// This op only parses the image header, so it is much faster than DecodeJpeg.
+//
+// Arguments:
+// contents: 0-D. The JPEG-encoded image.
+//
+// Returns 1-D. The image shape with format [height, width, channels].
+func ExtractJpegShape(scope *Scope, contents tf.Output, optional ...ExtractJpegShapeAttr) (image_shape tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "ZipDataset",
+ Type: "ExtractJpegShape",
Input: []tf.Input{
- tf.OutputList(input_datasets),
+ contents,
},
Attrs: attrs,
}
@@ -18514,165 +18880,132 @@ func ZipDataset(scope *Scope, input_datasets []tf.Output, output_types []tf.Data
return op.Output(0)
}
-// MutableDenseHashTableV2Attr is an optional argument to MutableDenseHashTableV2.
-type MutableDenseHashTableV2Attr func(optionalAttr)
+// PaddingFIFOQueueV2Attr is an optional argument to PaddingFIFOQueueV2.
+type PaddingFIFOQueueV2Attr func(optionalAttr)
-// MutableDenseHashTableV2Container sets the optional container attribute to value.
+// PaddingFIFOQueueV2Shapes sets the optional shapes attribute to value.
//
-// value: If non-empty, this table is placed in the given container.
-// Otherwise, a default container is used.
-// If not specified, defaults to ""
-func MutableDenseHashTableV2Container(value string) MutableDenseHashTableV2Attr {
- return func(m optionalAttr) {
- m["container"] = value
- }
-}
-
-// MutableDenseHashTableV2SharedName sets the optional shared_name attribute to value.
+// value: The shape of each component in a value. The length of this attr must
+// be either 0 or the same as the length of component_types.
+// Shapes of fixed rank but variable size are allowed by setting
+// any shape dimension to -1. In this case, the inputs' shape may vary along
+// the given dimension, and DequeueMany will pad the given dimension with
+// zeros up to the maximum shape of all elements in the given batch.
+// If the length of this attr is 0, different queue elements may have
+// different ranks and shapes, but only one element may be dequeued at a time.
+// If not specified, defaults to <>
//
-// value: If non-empty, this table is shared under the given name across
-// multiple sessions.
-// If not specified, defaults to ""
-func MutableDenseHashTableV2SharedName(value string) MutableDenseHashTableV2Attr {
- return func(m optionalAttr) {
- m["shared_name"] = value
- }
-}
-
-// MutableDenseHashTableV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
-// If not specified, defaults to false
-func MutableDenseHashTableV2UseNodeNameSharing(value bool) MutableDenseHashTableV2Attr {
+// REQUIRES: len(value) >= 0
+func PaddingFIFOQueueV2Shapes(value []tf.Shape) PaddingFIFOQueueV2Attr {
return func(m optionalAttr) {
- m["use_node_name_sharing"] = value
+ m["shapes"] = value
}
}
-// MutableDenseHashTableV2ValueShape sets the optional value_shape attribute to value.
+// PaddingFIFOQueueV2Capacity sets the optional capacity attribute to value.
//
-// value: The shape of each value.
-// If not specified, defaults to <>
-func MutableDenseHashTableV2ValueShape(value tf.Shape) MutableDenseHashTableV2Attr {
+// value: The upper bound on the number of elements in this queue.
+// Negative numbers mean no limit.
+// If not specified, defaults to -1
+func PaddingFIFOQueueV2Capacity(value int64) PaddingFIFOQueueV2Attr {
return func(m optionalAttr) {
- m["value_shape"] = value
+ m["capacity"] = value
}
}
-// MutableDenseHashTableV2InitialNumBuckets sets the optional initial_num_buckets attribute to value.
+// PaddingFIFOQueueV2Container sets the optional container attribute to value.
//
-// value: The initial number of hash table buckets. Must be a power
-// to 2.
-// If not specified, defaults to 131072
-func MutableDenseHashTableV2InitialNumBuckets(value int64) MutableDenseHashTableV2Attr {
+// value: If non-empty, this queue is placed in the given container.
+// Otherwise, a default container is used.
+// If not specified, defaults to ""
+func PaddingFIFOQueueV2Container(value string) PaddingFIFOQueueV2Attr {
return func(m optionalAttr) {
- m["initial_num_buckets"] = value
+ m["container"] = value
}
}
-// MutableDenseHashTableV2MaxLoadFactor sets the optional max_load_factor attribute to value.
+// PaddingFIFOQueueV2SharedName sets the optional shared_name attribute to value.
//
-// value: The maximum ratio between number of entries and number of
-// buckets before growing the table. Must be between 0 and 1.
-// If not specified, defaults to 0.8
-func MutableDenseHashTableV2MaxLoadFactor(value float32) MutableDenseHashTableV2Attr {
+// value: If non-empty, this queue will be shared under the given name
+// across multiple sessions.
+// If not specified, defaults to ""
+func PaddingFIFOQueueV2SharedName(value string) PaddingFIFOQueueV2Attr {
return func(m optionalAttr) {
- m["max_load_factor"] = value
+ m["shared_name"] = value
}
}
-// Creates an empty hash table that uses tensors as the backing store.
-//
-// It uses "open addressing" with quadratic reprobing to resolve
-// collisions.
+// A queue that produces elements in first-in first-out order.
//
-// This op creates a mutable hash table, specifying the type of its keys and
-// values. Each value must be a scalar. Data can be inserted into the table using
-// the insert operations. It does not support the initialization operation.
+// Variable-size shapes are allowed by setting the corresponding shape dimensions
+// to 0 in the shape attr. In this case DequeueMany will pad up to the maximum
+// size of any given element in the minibatch. See below for details.
//
// Arguments:
-// empty_key: The key used to represent empty key buckets internally. Must not
-// be used in insert or lookup operations.
-// value_dtype: Type of the table values.
+// component_types: The type of each component in a value.
//
-// Returns Handle to a table.
-func MutableDenseHashTableV2(scope *Scope, empty_key tf.Output, value_dtype tf.DataType, optional ...MutableDenseHashTableV2Attr) (table_handle tf.Output) {
+// Returns The handle to the queue.
+func PaddingFIFOQueueV2(scope *Scope, component_types []tf.DataType, optional ...PaddingFIFOQueueV2Attr) (handle tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"value_dtype": value_dtype}
+ attrs := map[string]interface{}{"component_types": component_types}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "MutableDenseHashTableV2",
- Input: []tf.Input{
- empty_key,
- },
+ Type: "PaddingFIFOQueueV2",
+
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// LRNAttr is an optional argument to LRN.
-type LRNAttr func(optionalAttr)
+// DecodePngAttr is an optional argument to DecodePng.
+type DecodePngAttr func(optionalAttr)
-// LRNDepthRadius sets the optional depth_radius attribute to value.
+// DecodePngChannels sets the optional channels attribute to value.
//
-// value: 0-D. Half-width of the 1-D normalization window.
-// If not specified, defaults to 5
-func LRNDepthRadius(value int64) LRNAttr {
+// value: Number of color channels for the decoded image.
+// If not specified, defaults to 0
+func DecodePngChannels(value int64) DecodePngAttr {
return func(m optionalAttr) {
- m["depth_radius"] = value
+ m["channels"] = value
}
}
-// LRNBias sets the optional bias attribute to value.
-//
-// value: An offset (usually positive to avoid dividing by 0).
-// If not specified, defaults to 1
-func LRNBias(value float32) LRNAttr {
+// DecodePngDtype sets the optional dtype attribute to value.
+// If not specified, defaults to DT_UINT8
+func DecodePngDtype(value tf.DataType) DecodePngAttr {
return func(m optionalAttr) {
- m["bias"] = value
+ m["dtype"] = value
}
}
-// LRNAlpha sets the optional alpha attribute to value.
+// Decode a PNG-encoded image to a uint8 or uint16 tensor.
//
-// value: A scale factor, usually positive.
-// If not specified, defaults to 1
-func LRNAlpha(value float32) LRNAttr {
- return func(m optionalAttr) {
- m["alpha"] = value
- }
-}
-
-// LRNBeta sets the optional beta attribute to value.
+// The attr `channels` indicates the desired number of color channels for the
+// decoded image.
//
-// value: An exponent.
-// If not specified, defaults to 0.5
-func LRNBeta(value float32) LRNAttr {
- return func(m optionalAttr) {
- m["beta"] = value
- }
-}
-
-// Local Response Normalization.
+// Accepted values are:
//
-// The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last
-// dimension), and each vector is normalized independently. Within a given vector,
-// each component is divided by the weighted, squared sum of inputs within
-// `depth_radius`. In detail,
+// * 0: Use the number of channels in the PNG-encoded image.
+// * 1: output a grayscale image.
+// * 3: output an RGB image.
+// * 4: output an RGBA image.
//
-// sqr_sum[a, b, c, d] =
-// sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
-// output = input / (bias + alpha * sqr_sum) ** beta
+// If needed, the PNG-encoded image is transformed to match the requested number
+// of color channels.
//
-// For details, see [Krizhevsky et al., ImageNet classification with deep
-// convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).
+// This op also supports decoding JPEGs and non-animated GIFs since the interface
+// is the same, though it is cleaner to use `tf.image.decode_image`.
//
// Arguments:
-// input: 4-D.
-func LRN(scope *Scope, input tf.Output, optional ...LRNAttr) (output tf.Output) {
+// contents: 0-D. The PNG-encoded image.
+//
+// Returns 3-D with shape `[height, width, channels]`.
+func DecodePng(scope *Scope, contents tf.Output, optional ...DecodePngAttr) (image tf.Output) {
if scope.Err() != nil {
return
}
@@ -18681,9 +19014,9 @@ func LRN(scope *Scope, input tf.Output, optional ...LRNAttr) (output tf.Output)
a(attrs)
}
opspec := tf.OpSpec{
- Type: "LRN",
+ Type: "DecodePng",
Input: []tf.Input{
- input,
+ contents,
},
Attrs: attrs,
}
@@ -18691,68 +19024,44 @@ func LRN(scope *Scope, input tf.Output, optional ...LRNAttr) (output tf.Output)
return op.Output(0)
}
-// Inverse fast Fourier transform.
-//
-// Computes the inverse 1-dimensional discrete Fourier transform over the
-// inner-most dimension of `input`.
+// Decode the first frame of a GIF-encoded image to a uint8 tensor.
//
-// Arguments:
-// input: A complex64 tensor.
+// GIF with frame or transparency compression are not supported
+// convert animated GIF from compressed to uncompressed by:
//
-// Returns A complex64 tensor of the same shape as `input`. The inner-most
-// dimension of `input` is replaced with its inverse 1D Fourier transform.
+// convert $src.gif -coalesce $dst.gif
//
-// @compatibility(numpy)
-// Equivalent to np.fft.ifft
-// @end_compatibility
-func IFFT(scope *Scope, input tf.Output) (output tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "IFFT",
- Input: []tf.Input{
- input,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// Creates a dataset that batches `batch_size` elements from `input_dataset`.
+// This op also supports decoding JPEGs and PNGs, though it is cleaner to use
+// `tf.image.decode_image`.
//
// Arguments:
+// contents: 0-D. The GIF-encoded image.
//
-// batch_size: A scalar representing the number of elements to accumulate in a
-// batch.
-//
-//
-func BatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
+// Returns 4-D with shape `[num_frames, height, width, 3]`. RGB order
+func DecodeGif(scope *Scope, contents tf.Output) (image tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
opspec := tf.OpSpec{
- Type: "BatchDataset",
+ Type: "DecodeGif",
Input: []tf.Input{
- input_dataset, batch_size,
+ contents,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// ResourceSparseApplyCenteredRMSPropAttr is an optional argument to ResourceSparseApplyCenteredRMSProp.
-type ResourceSparseApplyCenteredRMSPropAttr func(optionalAttr)
+// ResourceApplyCenteredRMSPropAttr is an optional argument to ResourceApplyCenteredRMSProp.
+type ResourceApplyCenteredRMSPropAttr func(optionalAttr)
-// ResourceSparseApplyCenteredRMSPropUseLocking sets the optional use_locking attribute to value.
+// ResourceApplyCenteredRMSPropUseLocking sets the optional use_locking attribute to value.
//
// value: If `True`, updating of the var, mg, ms, and mom tensors is
// protected by a lock; otherwise the behavior is undefined, but may exhibit less
// contention.
// If not specified, defaults to false
-func ResourceSparseApplyCenteredRMSPropUseLocking(value bool) ResourceSparseApplyCenteredRMSPropAttr {
+func ResourceApplyCenteredRMSPropUseLocking(value bool) ResourceApplyCenteredRMSPropAttr {
return func(m optionalAttr) {
m["use_locking"] = value
}
@@ -18771,10 +19080,12 @@ func ResourceSparseApplyCenteredRMSPropUseLocking(value bool) ResourceSparseAppl
//
// mean_square = decay * mean_square + (1-decay) * gradient ** 2
// mean_grad = decay * mean_grad + (1-decay) * gradient
+//
// Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
//
+// mg <- rho * mg_{t-1} + (1-rho) * grad
// ms <- rho * ms_{t-1} + (1-rho) * grad * grad
-// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
+// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)
// var <- var - mom
//
// Arguments:
@@ -18787,10 +19098,9 @@ func ResourceSparseApplyCenteredRMSPropUseLocking(value bool) ResourceSparseAppl
//
// epsilon: Ridge term. Must be a scalar.
// grad: The gradient.
-// indices: A vector of indices into the first dimension of var, ms and mom.
//
// Returns the created operation.
-func ResourceSparseApplyCenteredRMSProp(scope *Scope, var_ tf.Output, mg tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyCenteredRMSPropAttr) (o *tf.Operation) {
+func ResourceApplyCenteredRMSProp(scope *Scope, var_ tf.Output, mg tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyCenteredRMSPropAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
@@ -18799,131 +19109,225 @@ func ResourceSparseApplyCenteredRMSProp(scope *Scope, var_ tf.Output, mg tf.Outp
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ResourceSparseApplyCenteredRMSProp",
+ Type: "ResourceApplyCenteredRMSProp",
Input: []tf.Input{
- var_, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices,
+ var_, mg, ms, mom, lr, rho, momentum, epsilon, grad,
},
Attrs: attrs,
}
return scope.AddOperation(opspec)
}
-// Flips all bits elementwise.
+// Returns a list of tensors with the same shapes and contents as the input
//
-// The result will have exactly those bits set, that are not set in `x`. The
-// computation is performed on the underlying representation of x.
-func Invert(scope *Scope, x tf.Output) (y tf.Output) {
+// tensors.
+//
+// This op can be used to override the gradient for complicated functions. For
+// example, suppose y = f(x) and we wish to apply a custom function g for backprop
+// such that dx = g(dy). In Python,
+//
+// ```python
+// with tf.get_default_graph().gradient_override_map(
+// {'IdentityN': 'OverrideGradientWithG'}):
+// y, _ = identity_n([f(x), x])
+//
+// @tf.RegisterGradient('OverrideGradientWithG')
+// def ApplyG(op, dy, _):
+// return [None, g(dy)] # Do not backprop to f(x).
+// ```
+func IdentityN(scope *Scope, input []tf.Output) (output []tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Invert",
+ Type: "IdentityN",
Input: []tf.Input{
- x,
+ tf.OutputList(input),
},
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ if scope.Err() != nil {
+ return
+ }
+ var idx int
+ var err error
+ if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
+ scope.UpdateErr("IdentityN", err)
+ return
+ }
+ return output
}
-// Computes the mean along segments of a tensor.
-//
-// Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
-// segments.
+// Computes the gradient of the sigmoid of `x` wrt its input.
//
-// Computes a tensor such that
-// \\(output_i = \frac{\sum_j data_j}{N}\\) where `mean` is
-// over `j` such that `segment_ids[j] == i` and `N` is the total number of
-// values summed.
+// Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and
+// `dy` is the corresponding input gradient.
+func SigmoidGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "SigmoidGrad",
+ Input: []tf.Input{
+ y, dy,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
+// Convert one or more images from HSV to RGB.
//
-// If the mean is empty for a given segment ID `i`, `output[i] = 0`.
+// Outputs a tensor of the same shape as the `images` tensor, containing the RGB
+// value of the pixels. The output is only well defined if the value in `images`
+// are in `[0,1]`.
//
-// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMean.png" alt>
-// </div>
+// See `rgb_to_hsv` for a description of the HSV encoding.
//
// Arguments:
+// images: 1-D or higher rank. HSV data to convert. Last dimension must be size 3.
//
-// segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s
-// first dimension. Values should be sorted and can be repeated.
-//
-// Returns Has same shape as data, except for dimension 0 which
-// has size `k`, the number of segments.
-func SegmentMean(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
+// Returns `images` converted to RGB.
+func HSVToRGB(scope *Scope, images tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "SegmentMean",
+ Type: "HSVToRGB",
Input: []tf.Input{
- data, segment_ids,
+ images,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// CumprodAttr is an optional argument to Cumprod.
-type CumprodAttr func(optionalAttr)
+// SampleDistortedBoundingBoxV2Attr is an optional argument to SampleDistortedBoundingBoxV2.
+type SampleDistortedBoundingBoxV2Attr func(optionalAttr)
-// CumprodExclusive sets the optional exclusive attribute to value.
+// SampleDistortedBoundingBoxV2Seed sets the optional seed attribute to value.
//
-// value: If `True`, perform exclusive cumprod.
-// If not specified, defaults to false
-func CumprodExclusive(value bool) CumprodAttr {
+// value: If either `seed` or `seed2` are set to non-zero, the random number
+// generator is seeded by the given `seed`. Otherwise, it is seeded by a random
+// seed.
+// If not specified, defaults to 0
+func SampleDistortedBoundingBoxV2Seed(value int64) SampleDistortedBoundingBoxV2Attr {
return func(m optionalAttr) {
- m["exclusive"] = value
+ m["seed"] = value
}
}
-// CumprodReverse sets the optional reverse attribute to value.
+// SampleDistortedBoundingBoxV2Seed2 sets the optional seed2 attribute to value.
//
-// value: A `bool` (default: False).
-// If not specified, defaults to false
-func CumprodReverse(value bool) CumprodAttr {
+// value: A second seed to avoid seed collision.
+// If not specified, defaults to 0
+func SampleDistortedBoundingBoxV2Seed2(value int64) SampleDistortedBoundingBoxV2Attr {
return func(m optionalAttr) {
- m["reverse"] = value
+ m["seed2"] = value
}
}
-// Compute the cumulative product of the tensor `x` along `axis`.
+// SampleDistortedBoundingBoxV2AspectRatioRange sets the optional aspect_ratio_range attribute to value.
//
-// By default, this op performs an inclusive cumprod, which means that the first
-// element of the input is identical to the first element of the output:
+// value: The cropped area of the image must have an aspect ratio =
+// width / height within this range.
+// If not specified, defaults to <f:0.75 f:1.33 >
+func SampleDistortedBoundingBoxV2AspectRatioRange(value []float32) SampleDistortedBoundingBoxV2Attr {
+ return func(m optionalAttr) {
+ m["aspect_ratio_range"] = value
+ }
+}
+
+// SampleDistortedBoundingBoxV2AreaRange sets the optional area_range attribute to value.
//
-// ```python
-// tf.cumprod([a, b, c]) # => [a, a * b, a * b * c]
-// ```
+// value: The cropped area of the image must contain a fraction of the
+// supplied image within in this range.
+// If not specified, defaults to <f:0.05 f:1 >
+func SampleDistortedBoundingBoxV2AreaRange(value []float32) SampleDistortedBoundingBoxV2Attr {
+ return func(m optionalAttr) {
+ m["area_range"] = value
+ }
+}
+
+// SampleDistortedBoundingBoxV2MaxAttempts sets the optional max_attempts attribute to value.
//
-// By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
-// performed instead:
+// value: Number of attempts at generating a cropped region of the image
+// of the specified constraints. After `max_attempts` failures, return the entire
+// image.
+// If not specified, defaults to 100
+func SampleDistortedBoundingBoxV2MaxAttempts(value int64) SampleDistortedBoundingBoxV2Attr {
+ return func(m optionalAttr) {
+ m["max_attempts"] = value
+ }
+}
+
+// SampleDistortedBoundingBoxV2UseImageIfNoBoundingBoxes sets the optional use_image_if_no_bounding_boxes attribute to value.
//
-// ```python
-// tf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b]
-// ```
+// value: Controls behavior if no bounding boxes supplied.
+// If true, assume an implicit bounding box covering the whole input. If false,
+// raise an error.
+// If not specified, defaults to false
+func SampleDistortedBoundingBoxV2UseImageIfNoBoundingBoxes(value bool) SampleDistortedBoundingBoxV2Attr {
+ return func(m optionalAttr) {
+ m["use_image_if_no_bounding_boxes"] = value
+ }
+}
+
+// Generate a single randomly distorted bounding box for an image.
//
-// By setting the `reverse` kwarg to `True`, the cumprod is performed in the
-// opposite direction:
+// Bounding box annotations are often supplied in addition to ground-truth labels
+// in image recognition or object localization tasks. A common technique for
+// training such a system is to randomly distort an image while preserving
+// its content, i.e. *data augmentation*. This Op outputs a randomly distorted
+// localization of an object, i.e. bounding box, given an `image_size`,
+// `bounding_boxes` and a series of constraints.
//
-// ```python
-// tf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c]
-// ```
+// The output of this Op is a single bounding box that may be used to crop the
+// original image. The output is returned as 3 tensors: `begin`, `size` and
+// `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
+// image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize
+// what the bounding box looks like.
//
-// This is more efficient than using separate `tf.reverse` ops.
+// Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The
+// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
+// height of the underlying image.
//
-// The `reverse` and `exclusive` kwargs can also be combined:
+// For example,
//
// ```python
-// tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1]
+// # Generate a single distorted bounding box.
+// begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
+// tf.shape(image),
+// bounding_boxes=bounding_boxes)
+//
+// # Draw the bounding box in an image summary.
+// image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
+// bbox_for_draw)
+// tf.summary.image('images_with_box', image_with_box)
+//
+// # Employ the bounding box to distort the image.
+// distorted_image = tf.slice(image, begin, size)
// ```
//
+// Note that if no bounding box information is available, setting
+// `use_image_if_no_bounding_boxes = true` will assume there is a single implicit
+// bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
+// false and no bounding boxes are supplied, an error is raised.
+//
// Arguments:
-// x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
-// `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
-// `complex128`, `qint8`, `quint8`, `qint32`, `half`.
-// axis: A `Tensor` of type `int32` (default: 0). Must be in the range
-// `[-rank(x), rank(x))`.
-func Cumprod(scope *Scope, x tf.Output, axis tf.Output, optional ...CumprodAttr) (out tf.Output) {
+// image_size: 1-D, containing `[height, width, channels]`.
+// bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes
+// associated with the image.
+// min_object_covered: The cropped area of the image must contain at least this
+// fraction of any bounding box supplied. The value of this parameter should be
+// non-negative. In the case of 0, the cropped area does not need to overlap
+// any of the bounding boxes supplied.
+//
+// Returns 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to
+// `tf.slice`.1-D, containing `[target_height, target_width, -1]`. Provide as input to
+// `tf.slice`.3-D with shape `[1, 1, 4]` containing the distorted bounding box.
+// Provide as input to `tf.image.draw_bounding_boxes`.
+func SampleDistortedBoundingBoxV2(scope *Scope, image_size tf.Output, bounding_boxes tf.Output, min_object_covered tf.Output, optional ...SampleDistortedBoundingBoxV2Attr) (begin tf.Output, size tf.Output, bboxes tf.Output) {
if scope.Err() != nil {
return
}
@@ -18932,40 +19336,88 @@ func Cumprod(scope *Scope, x tf.Output, axis tf.Output, optional ...CumprodAttr)
a(attrs)
}
opspec := tf.OpSpec{
- Type: "Cumprod",
+ Type: "SampleDistortedBoundingBoxV2",
Input: []tf.Input{
- x, axis,
+ image_size, bounding_boxes, min_object_covered,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// DestroyResourceOpAttr is an optional argument to DestroyResourceOp.
-type DestroyResourceOpAttr func(optionalAttr)
+// ExtractGlimpseAttr is an optional argument to ExtractGlimpse.
+type ExtractGlimpseAttr func(optionalAttr)
-// DestroyResourceOpIgnoreLookupError sets the optional ignore_lookup_error attribute to value.
+// ExtractGlimpseCentered sets the optional centered attribute to value.
//
-// value: whether to ignore the error when the resource
-// doesn't exist.
+// value: indicates if the offset coordinates are centered relative to
+// the image, in which case the (0, 0) offset is relative to the center
+// of the input images. If false, the (0,0) offset corresponds to the
+// upper left corner of the input images.
// If not specified, defaults to true
-func DestroyResourceOpIgnoreLookupError(value bool) DestroyResourceOpAttr {
+func ExtractGlimpseCentered(value bool) ExtractGlimpseAttr {
return func(m optionalAttr) {
- m["ignore_lookup_error"] = value
+ m["centered"] = value
}
}
-// Deletes the resource specified by the handle.
+// ExtractGlimpseNormalized sets the optional normalized attribute to value.
//
-// All subsequent operations using the resource will result in a NotFound
-// error status.
+// value: indicates if the offset coordinates are normalized.
+// If not specified, defaults to true
+func ExtractGlimpseNormalized(value bool) ExtractGlimpseAttr {
+ return func(m optionalAttr) {
+ m["normalized"] = value
+ }
+}
+
+// ExtractGlimpseUniformNoise sets the optional uniform_noise attribute to value.
+//
+// value: indicates if the noise should be generated using a
+// uniform distribution or a Gaussian distribution.
+// If not specified, defaults to true
+func ExtractGlimpseUniformNoise(value bool) ExtractGlimpseAttr {
+ return func(m optionalAttr) {
+ m["uniform_noise"] = value
+ }
+}
+
+// Extracts a glimpse from the input tensor.
+//
+// Returns a set of windows called glimpses extracted at location
+// `offsets` from the input tensor. If the windows only partially
+// overlaps the inputs, the non overlapping areas will be filled with
+// random noise.
+//
+// The result is a 4-D tensor of shape `[batch_size, glimpse_height,
+// glimpse_width, channels]`. The channels and batch dimensions are the
+// same as that of the input tensor. The height and width of the output
+// windows are specified in the `size` parameter.
+//
+// The argument `normalized` and `centered` controls how the windows are built:
+//
+// * If the coordinates are normalized but not centered, 0.0 and 1.0
+// correspond to the minimum and maximum of each height and width
+// dimension.
+// * If the coordinates are both normalized and centered, they range from
+// -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper
+// left corner, the lower right corner is located at (1.0, 1.0) and the
+// center is at (0, 0).
+// * If the coordinates are not normalized they are interpreted as
+// numbers of pixels.
//
// Arguments:
-// resource: handle to the resource to delete.
+// input: A 4-D float tensor of shape `[batch_size, height, width, channels]`.
+// size: A 1-D tensor of 2 elements containing the size of the glimpses
+// to extract. The glimpse height must be specified first, following
+// by the glimpse width.
+// offsets: A 2-D integer tensor of shape `[batch_size, 2]` containing
+// the y, x locations of the center of each window.
//
-// Returns the created operation.
-func DestroyResourceOp(scope *Scope, resource tf.Output, optional ...DestroyResourceOpAttr) (o *tf.Operation) {
+// Returns A tensor representing the glimpses `[batch_size,
+// glimpse_height, glimpse_width, channels]`.
+func ExtractGlimpse(scope *Scope, input tf.Output, size tf.Output, offsets tf.Output, optional ...ExtractGlimpseAttr) (glimpse tf.Output) {
if scope.Err() != nil {
return
}
@@ -18974,221 +19426,222 @@ func DestroyResourceOp(scope *Scope, resource tf.Output, optional ...DestroyReso
a(attrs)
}
opspec := tf.OpSpec{
- Type: "DestroyResourceOp",
+ Type: "ExtractGlimpse",
Input: []tf.Input{
- resource,
+ input, size, offsets,
},
Attrs: attrs,
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Converts each string in the input Tensor to its hash mod by a number of buckets.
-//
-// The hash function is deterministic on the content of the string within the
-// process. The hash function is a keyed hash function, where attribute `key`
-// defines the key of the hash function. `key` is an array of 2 elements.
-//
-// A strong hash is important when inputs may be malicious, e.g. URLs with
-// additional components. Adversaries could try to make their inputs hash to the
-// same bucket for a denial-of-service attack or to skew the results. A strong
-// hash prevents this by making it difficult, if not infeasible, to compute inputs
-// that hash to the same bucket. This comes at a cost of roughly 4x higher compute
-// time than `tf.string_to_hash_bucket_fast`.
-//
-// Arguments:
-// input: The strings to assign a hash bucket.
-// num_buckets: The number of buckets.
-// key: The key for the keyed hash function passed as a list of two uint64
-// elements.
+// A container for an iterator resource.
//
-// Returns A Tensor of the same shape as the input `string_tensor`.
-func StringToHashBucketStrong(scope *Scope, input tf.Output, num_buckets int64, key []int64) (output tf.Output) {
+// Returns A handle to the iterator that can be passed to a "MakeIterator"
+// or "IteratorGetNext" op.
+func Iterator(scope *Scope, shared_name string, container string, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"num_buckets": num_buckets, "key": key}
+ attrs := map[string]interface{}{"shared_name": shared_name, "container": container, "output_types": output_types, "output_shapes": output_shapes}
opspec := tf.OpSpec{
- Type: "StringToHashBucketStrong",
- Input: []tf.Input{
- input,
- },
+ Type: "Iterator",
+
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Encode audio data using the WAV file format.
-//
-// This operation will generate a string suitable to be saved out to create a .wav
-// audio file. It will be encoded in the 16-bit PCM format. It takes in float
-// values in the range -1.0f to 1.0f, and any outside that value will be clamped to
-// that range.
+// ShuffleDatasetAttr is an optional argument to ShuffleDataset.
+type ShuffleDatasetAttr func(optionalAttr)
+
+// ShuffleDatasetReshuffleEachIteration sets the optional reshuffle_each_iteration attribute to value.
//
-// `audio` is a 2-D float Tensor of shape `[length, channels]`.
-// `sample_rate` is a scalar Tensor holding the rate to use (e.g. 44100).
+// value: If true, each iterator over this dataset will be given
+// a different pseudorandomly generated seed, based on a sequence seeded by the
+// `seed` and `seed2` inputs. If false, each iterator will be given the same
+// seed, and repeated iteration over this dataset will yield the exact same
+// sequence of results.
+// If not specified, defaults to true
+func ShuffleDatasetReshuffleEachIteration(value bool) ShuffleDatasetAttr {
+ return func(m optionalAttr) {
+ m["reshuffle_each_iteration"] = value
+ }
+}
+
+// Creates a dataset that shuffles elements from `input_dataset` pseudorandomly.
//
// Arguments:
-// audio: 2-D with shape `[length, channels]`.
-// sample_rate: Scalar containing the sample frequency.
//
-// Returns 0-D. WAV-encoded file contents.
-func EncodeWav(scope *Scope, audio tf.Output, sample_rate tf.Output) (contents tf.Output) {
+// buffer_size: The number of output elements to buffer in an iterator over
+// this dataset. Compare with the `min_after_dequeue` attr when creating a
+// `RandomShuffleQueue`.
+// seed: A scalar seed for the random number generator. If either `seed` or
+// `seed2` is set to be non-zero, the random number generator is seeded
+// by the given seed. Otherwise, a random seed is used.
+// seed2: A second scalar seed to avoid seed collision.
+//
+//
+func ShuffleDataset(scope *Scope, input_dataset tf.Output, buffer_size tf.Output, seed tf.Output, seed2 tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ShuffleDatasetAttr) (handle tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "EncodeWav",
+ Type: "ShuffleDataset",
Input: []tf.Input{
- audio, sample_rate,
+ input_dataset, buffer_size, seed, seed2,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// The gradient operator for the SparseAdd op.
+// 3D fast Fourier transform.
//
-// The SparseAdd op calculates A + B, where A, B, and the sum are all represented
-// as `SparseTensor` objects. This op takes in the upstream gradient w.r.t.
-// non-empty values of the sum, and outputs the gradients w.r.t. the non-empty
-// values of A and B.
+// Computes the 3-dimensional discrete Fourier transform over the inner-most 3
+// dimensions of `input`.
//
// Arguments:
-// backprop_val_grad: 1-D with shape `[nnz(sum)]`. The gradient with respect to
-// the non-empty values of the sum.
-// a_indices: 2-D. The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`.
-// b_indices: 2-D. The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`.
-// sum_indices: 2-D. The `indices` of the sum `SparseTensor`, size
-// `[nnz(sum), ndims]`.
+// input: A complex64 tensor.
//
-// Returns 1-D with shape `[nnz(A)]`. The gradient with respect to the
-// non-empty values of A.1-D with shape `[nnz(B)]`. The gradient with respect to the
-// non-empty values of B.
-func SparseAddGrad(scope *Scope, backprop_val_grad tf.Output, a_indices tf.Output, b_indices tf.Output, sum_indices tf.Output) (a_val_grad tf.Output, b_val_grad tf.Output) {
+// Returns A complex64 tensor of the same shape as `input`. The inner-most 3
+// dimensions of `input` are replaced with their 3D Fourier transform.
+//
+// @compatibility(numpy)
+// Equivalent to np.fft.fftn with 3 dimensions.
+// @end_compatibility
+func FFT3D(scope *Scope, input tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "SparseAddGrad",
+ Type: "FFT3D",
Input: []tf.Input{
- backprop_val_grad, a_indices, b_indices, sum_indices,
+ input,
},
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
+ return op.Output(0)
}
-// Adds `bias` to `value`.
-//
-// This is a deprecated version of BiasAdd and will be soon removed.
+// CropAndResizeGradBoxesAttr is an optional argument to CropAndResizeGradBoxes.
+type CropAndResizeGradBoxesAttr func(optionalAttr)
+
+// CropAndResizeGradBoxesMethod sets the optional method attribute to value.
//
-// This is a special case of `tf.add` where `bias` is restricted to be 1-D.
-// Broadcasting is supported, so `value` may have any number of dimensions.
+// value: A string specifying the interpolation method. Only 'bilinear' is
+// supported for now.
+// If not specified, defaults to "bilinear"
+func CropAndResizeGradBoxesMethod(value string) CropAndResizeGradBoxesAttr {
+ return func(m optionalAttr) {
+ m["method"] = value
+ }
+}
+
+// Computes the gradient of the crop_and_resize op wrt the input boxes tensor.
//
// Arguments:
-// value: Any number of dimensions.
-// bias: 1-D with size the last dimension of `value`.
+// grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
+// image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
+// Both `image_height` and `image_width` need to be positive.
+// boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
+// specifies the coordinates of a box in the `box_ind[i]` image and is specified
+// in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
+// `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
+// `[0, 1]` interval of normalized image height is mapped to
+// `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in
+// which case the sampled crop is an up-down flipped version of the original
+// image. The width dimension is treated similarly. Normalized coordinates
+// outside the `[0, 1]` range are allowed, in which case we use
+// `extrapolation_value` to extrapolate the input image values.
+// box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
+// The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
//
-// Returns Broadcasted sum of `value` and `bias`.
-func BiasAddV1(scope *Scope, value tf.Output, bias tf.Output) (output tf.Output) {
+// Returns A 2-D tensor of shape `[num_boxes, 4]`.
+func CropAndResizeGradBoxes(scope *Scope, grads tf.Output, image tf.Output, boxes tf.Output, box_ind tf.Output, optional ...CropAndResizeGradBoxesAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "BiasAddV1",
+ Type: "CropAndResizeGradBoxes",
Input: []tf.Input{
- value, bias,
+ grads, image, boxes, box_ind,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// FixedLengthRecordReaderV2Attr is an optional argument to FixedLengthRecordReaderV2.
-type FixedLengthRecordReaderV2Attr func(optionalAttr)
-
-// FixedLengthRecordReaderV2HeaderBytes sets the optional header_bytes attribute to value.
+// Saves tensors in V2 checkpoint format.
//
-// value: Number of bytes in the header, defaults to 0.
-// If not specified, defaults to 0
-func FixedLengthRecordReaderV2HeaderBytes(value int64) FixedLengthRecordReaderV2Attr {
- return func(m optionalAttr) {
- m["header_bytes"] = value
- }
-}
-
-// FixedLengthRecordReaderV2FooterBytes sets the optional footer_bytes attribute to value.
+// By default, saves the named tensors in full. If the caller wishes to save
+// specific slices of full tensors, "shape_and_slices" should be non-empty strings
+// and correspondingly well-formed.
//
-// value: Number of bytes in the footer, defaults to 0.
-// If not specified, defaults to 0
-func FixedLengthRecordReaderV2FooterBytes(value int64) FixedLengthRecordReaderV2Attr {
- return func(m optionalAttr) {
- m["footer_bytes"] = value
- }
-}
-
-// FixedLengthRecordReaderV2HopBytes sets the optional hop_bytes attribute to value.
+// Arguments:
+// prefix: Must have a single element. The prefix of the V2 checkpoint to which we
+// write the tensors.
+// tensor_names: shape {N}. The names of the tensors to be saved.
+// shape_and_slices: shape {N}. The slice specs of the tensors to be saved.
+// Empty strings indicate that they are non-partitioned tensors.
+// tensors: `N` tensors to save.
//
-// value: Number of bytes to hop before each read. Default of 0 means using
-// record_bytes.
-// If not specified, defaults to 0
-func FixedLengthRecordReaderV2HopBytes(value int64) FixedLengthRecordReaderV2Attr {
- return func(m optionalAttr) {
- m["hop_bytes"] = value
+// Returns the created operation.
+func SaveV2(scope *Scope, prefix tf.Output, tensor_names tf.Output, shape_and_slices tf.Output, tensors []tf.Output) (o *tf.Operation) {
+ if scope.Err() != nil {
+ return
}
+ opspec := tf.OpSpec{
+ Type: "SaveV2",
+ Input: []tf.Input{
+ prefix, tensor_names, shape_and_slices, tf.OutputList(tensors),
+ },
+ }
+ return scope.AddOperation(opspec)
}
-// FixedLengthRecordReaderV2Container sets the optional container attribute to value.
-//
-// value: If non-empty, this reader is placed in the given container.
-// Otherwise, a default container is used.
+// StatsAggregatorHandleAttr is an optional argument to StatsAggregatorHandle.
+type StatsAggregatorHandleAttr func(optionalAttr)
+
+// StatsAggregatorHandleContainer sets the optional container attribute to value.
// If not specified, defaults to ""
-func FixedLengthRecordReaderV2Container(value string) FixedLengthRecordReaderV2Attr {
+func StatsAggregatorHandleContainer(value string) StatsAggregatorHandleAttr {
return func(m optionalAttr) {
m["container"] = value
}
}
-// FixedLengthRecordReaderV2SharedName sets the optional shared_name attribute to value.
-//
-// value: If non-empty, this reader is named in the given bucket
-// with this shared_name. Otherwise, the node name is used instead.
+// StatsAggregatorHandleSharedName sets the optional shared_name attribute to value.
// If not specified, defaults to ""
-func FixedLengthRecordReaderV2SharedName(value string) FixedLengthRecordReaderV2Attr {
+func StatsAggregatorHandleSharedName(value string) StatsAggregatorHandleAttr {
return func(m optionalAttr) {
m["shared_name"] = value
}
}
-// FixedLengthRecordReaderV2Encoding sets the optional encoding attribute to value.
-//
-// value: The type of encoding for the file. Currently ZLIB and GZIP
-// are supported. Defaults to none.
-// If not specified, defaults to ""
-func FixedLengthRecordReaderV2Encoding(value string) FixedLengthRecordReaderV2Attr {
- return func(m optionalAttr) {
- m["encoding"] = value
- }
-}
-
-// A Reader that outputs fixed-length records from a file.
-//
-// Arguments:
-// record_bytes: Number of bytes in the record.
-//
-// Returns The handle to reference the Reader.
-func FixedLengthRecordReaderV2(scope *Scope, record_bytes int64, optional ...FixedLengthRecordReaderV2Attr) (reader_handle tf.Output) {
+// Creates a statistics manager resource.
+func StatsAggregatorHandle(scope *Scope, optional ...StatsAggregatorHandleAttr) (handle tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"record_bytes": record_bytes}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "FixedLengthRecordReaderV2",
+ Type: "StatsAggregatorHandle",
Attrs: attrs,
}
@@ -19196,196 +19649,225 @@ func FixedLengthRecordReaderV2(scope *Scope, record_bytes int64, optional ...Fix
return op.Output(0)
}
-// QuantizedRelu6Attr is an optional argument to QuantizedRelu6.
-type QuantizedRelu6Attr func(optionalAttr)
-
-// QuantizedRelu6OutType sets the optional out_type attribute to value.
-// If not specified, defaults to DT_QUINT8
-func QuantizedRelu6OutType(value tf.DataType) QuantizedRelu6Attr {
- return func(m optionalAttr) {
- m["out_type"] = value
- }
-}
-
-// Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)`
+// Greedily selects a subset of bounding boxes in descending order of score,
//
-// Arguments:
+// pruning away boxes that have high intersection-over-union (IOU) overlap
+// with previously selected boxes. Bounding boxes are supplied as
+// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
+// diagonal pair of box corners and the coordinates can be provided as normalized
+// (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm
+// is agnostic to where the origin is in the coordinate system. Note that this
+// algorithm is invariant to orthogonal transformations and translations
+// of the coordinate system; thus translating or reflections of the coordinate
+// system result in the same boxes being selected by the algorithm.
//
-// min_features: The float value that the lowest quantized value represents.
-// max_features: The float value that the highest quantized value represents.
+// The output of this operation is a set of integers indexing into the input
+// collection of bounding boxes representing the selected boxes. The bounding
+// box coordinates corresponding to the selected indices can then be obtained
+// using the `tf.gather operation`. For example:
//
-// Returns Has the same output shape as "features".The float value that the lowest quantized value represents.The float value that the highest quantized value represents.
-func QuantizedRelu6(scope *Scope, features tf.Output, min_features tf.Output, max_features tf.Output, optional ...QuantizedRelu6Attr) (activations tf.Output, min_activations tf.Output, max_activations tf.Output) {
+// selected_indices = tf.image.non_max_suppression_v2(
+// boxes, scores, max_output_size, iou_threshold)
+// selected_boxes = tf.gather(boxes, selected_indices)
+//
+// Arguments:
+// boxes: A 2-D float tensor of shape `[num_boxes, 4]`.
+// scores: A 1-D float tensor of shape `[num_boxes]` representing a single
+// score corresponding to each box (each row of boxes).
+// max_output_size: A scalar integer tensor representing the maximum number of
+// boxes to be selected by non max suppression.
+// iou_threshold: A 0-D float tensor representing the threshold for deciding whether
+// boxes overlap too much with respect to IOU.
+//
+// Returns A 1-D integer tensor of shape `[M]` representing the selected
+// indices from the boxes tensor, where `M <= max_output_size`.
+func NonMaxSuppressionV2(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, iou_threshold tf.Output) (selected_indices tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "QuantizedRelu6",
+ Type: "NonMaxSuppressionV2",
Input: []tf.Input{
- features, min_features, max_features,
+ boxes, scores, max_output_size, iou_threshold,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
-}
-
-// CumsumAttr is an optional argument to Cumsum.
-type CumsumAttr func(optionalAttr)
-
-// CumsumExclusive sets the optional exclusive attribute to value.
-//
-// value: If `True`, perform exclusive cumsum.
-// If not specified, defaults to false
-func CumsumExclusive(value bool) CumsumAttr {
- return func(m optionalAttr) {
- m["exclusive"] = value
- }
+ return op.Output(0)
}
-// CumsumReverse sets the optional reverse attribute to value.
+// Reshapes a tensor.
//
-// value: A `bool` (default: False).
-// If not specified, defaults to false
-func CumsumReverse(value bool) CumsumAttr {
- return func(m optionalAttr) {
- m["reverse"] = value
- }
-}
-
-// Compute the cumulative sum of the tensor `x` along `axis`.
+// Given `tensor`, this operation returns a tensor that has the same values
+// as `tensor` with shape `shape`.
//
-// By default, this op performs an inclusive cumsum, which means that the first
-// element of the input is identical to the first element of the output:
+// If one component of `shape` is the special value -1, the size of that dimension
+// is computed so that the total size remains constant. In particular, a `shape`
+// of `[-1]` flattens into 1-D. At most one component of `shape` can be -1.
//
-// ```python
-// tf.cumsum([a, b, c]) # => [a, a + b, a + b + c]
-// ```
+// If `shape` is 1-D or higher, then the operation returns a tensor with shape
+// `shape` filled with the values of `tensor`. In this case, the number of elements
+// implied by `shape` must be the same as the number of elements in `tensor`.
//
-// By setting the `exclusive` kwarg to `True`, an exclusive cumsum is
-// performed instead:
+// For example:
//
-// ```python
-// tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b]
// ```
+// # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
+// # tensor 't' has shape [9]
+// reshape(t, [3, 3]) ==> [[1, 2, 3],
+// [4, 5, 6],
+// [7, 8, 9]]
//
-// By setting the `reverse` kwarg to `True`, the cumsum is performed in the
-// opposite direction:
+// # tensor 't' is [[[1, 1], [2, 2]],
+// # [[3, 3], [4, 4]]]
+// # tensor 't' has shape [2, 2, 2]
+// reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
+// [3, 3, 4, 4]]
//
-// ```python
-// tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c]
-// ```
+// # tensor 't' is [[[1, 1, 1],
+// # [2, 2, 2]],
+// # [[3, 3, 3],
+// # [4, 4, 4]],
+// # [[5, 5, 5],
+// # [6, 6, 6]]]
+// # tensor 't' has shape [3, 2, 3]
+// # pass '[-1]' to flatten 't'
+// reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
//
-// This is more efficient than using separate `tf.reverse` ops.
+// # -1 can also be used to infer the shape
//
-// The `reverse` and `exclusive` kwargs can also be combined:
+// # -1 is inferred to be 9:
+// reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
+// [4, 4, 4, 5, 5, 5, 6, 6, 6]]
+// # -1 is inferred to be 2:
+// reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
+// [4, 4, 4, 5, 5, 5, 6, 6, 6]]
+// # -1 is inferred to be 3:
+// reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],
+// [2, 2, 2],
+// [3, 3, 3]],
+// [[4, 4, 4],
+// [5, 5, 5],
+// [6, 6, 6]]]
//
-// ```python
-// tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0]
+// # tensor 't' is [7]
+// # shape `[]` reshapes to a scalar
+// reshape(t, []) ==> 7
// ```
//
// Arguments:
-// x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
-// `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
-// `complex128`, `qint8`, `quint8`, `qint32`, `half`.
-// axis: A `Tensor` of type `int32` (default: 0). Must be in the range
-// `[-rank(x), rank(x))`.
-func Cumsum(scope *Scope, x tf.Output, axis tf.Output, optional ...CumsumAttr) (out tf.Output) {
+//
+// shape: Defines the shape of the output tensor.
+func Reshape(scope *Scope, tensor tf.Output, shape tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "Cumsum",
+ Type: "Reshape",
Input: []tf.Input{
- x, axis,
+ tensor, shape,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// AsStringAttr is an optional argument to AsString.
-type AsStringAttr func(optionalAttr)
-
-// AsStringPrecision sets the optional precision attribute to value.
-//
-// value: The post-decimal precision to use for floating point numbers.
-// Only used if precision > -1.
-// If not specified, defaults to -1
-func AsStringPrecision(value int64) AsStringAttr {
- return func(m optionalAttr) {
- m["precision"] = value
+// Creates a dataset that splits a SparseTensor into elements row-wise.
+func SparseTensorSliceDataset(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output) (handle tf.Output) {
+ if scope.Err() != nil {
+ return
}
-}
-
-// AsStringScientific sets the optional scientific attribute to value.
-//
-// value: Use scientific notation for floating point numbers.
-// If not specified, defaults to false
-func AsStringScientific(value bool) AsStringAttr {
- return func(m optionalAttr) {
- m["scientific"] = value
+ opspec := tf.OpSpec{
+ Type: "SparseTensorSliceDataset",
+ Input: []tf.Input{
+ indices, values, dense_shape,
+ },
}
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// AsStringShortest sets the optional shortest attribute to value.
-//
-// value: Use shortest representation (either scientific or standard) for
-// floating point numbers.
-// If not specified, defaults to false
-func AsStringShortest(value bool) AsStringAttr {
- return func(m optionalAttr) {
- m["shortest"] = value
+// Creates a dataset that concatenates `input_dataset` with `another_dataset`.
+func ConcatenateDataset(scope *Scope, input_dataset tf.Output, another_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
+ opspec := tf.OpSpec{
+ Type: "ConcatenateDataset",
+ Input: []tf.Input{
+ input_dataset, another_dataset,
+ },
+ Attrs: attrs,
}
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// AsStringWidth sets the optional width attribute to value.
-//
-// value: Pad pre-decimal numbers to this width.
-// Applies to both floating point and integer numbers.
-// Only used if width > -1.
-// If not specified, defaults to -1
-func AsStringWidth(value int64) AsStringAttr {
- return func(m optionalAttr) {
- m["width"] = value
+// Creates a dataset that contains the elements of `input_dataset` ignoring errors.
+func IgnoreErrorsDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
+ opspec := tf.OpSpec{
+ Type: "IgnoreErrorsDataset",
+ Input: []tf.Input{
+ input_dataset,
+ },
+ Attrs: attrs,
}
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// AsStringFill sets the optional fill attribute to value.
+// CropAndResizeGradImageAttr is an optional argument to CropAndResizeGradImage.
+type CropAndResizeGradImageAttr func(optionalAttr)
+
+// CropAndResizeGradImageMethod sets the optional method attribute to value.
//
-// value: The value to pad if width > -1. If empty, pads with spaces.
-// Another typical value is '0'. String cannot be longer than 1 character.
-// If not specified, defaults to ""
-func AsStringFill(value string) AsStringAttr {
+// value: A string specifying the interpolation method. Only 'bilinear' is
+// supported for now.
+// If not specified, defaults to "bilinear"
+func CropAndResizeGradImageMethod(value string) CropAndResizeGradImageAttr {
return func(m optionalAttr) {
- m["fill"] = value
+ m["method"] = value
}
}
-// Converts each entry in the given tensor to strings. Supports many numeric
+// Computes the gradient of the crop_and_resize op wrt the input image tensor.
//
-// types and boolean.
-func AsString(scope *Scope, input tf.Output, optional ...AsStringAttr) (output tf.Output) {
+// Arguments:
+// grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
+// boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
+// specifies the coordinates of a box in the `box_ind[i]` image and is specified
+// in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
+// `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
+// `[0, 1]` interval of normalized image height is mapped to
+// `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in
+// which case the sampled crop is an up-down flipped version of the original
+// image. The width dimension is treated similarly. Normalized coordinates
+// outside the `[0, 1]` range are allowed, in which case we use
+// `extrapolation_value` to extrapolate the input image values.
+// box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
+// The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
+// image_size: A 1-D tensor with value `[batch, image_height, image_width, depth]`
+// containing the original image size. Both `image_height` and `image_width` need
+// to be positive.
+//
+//
+// Returns A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
+func CropAndResizeGradImage(scope *Scope, grads tf.Output, boxes tf.Output, box_ind tf.Output, image_size tf.Output, T tf.DataType, optional ...CropAndResizeGradImageAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"T": T}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "AsString",
+ Type: "CropAndResizeGradImage",
Input: []tf.Input{
- input,
+ grads, boxes, box_ind, image_size,
},
Attrs: attrs,
}
@@ -19393,211 +19875,163 @@ func AsString(scope *Scope, input tf.Output, optional ...AsStringAttr) (output t
return op.Output(0)
}
-// Assigns sparse updates to the variable referenced by `resource`.
-//
-// This operation computes
-//
-// # Scalar indices
-// ref[indices, ...] = updates[...]
-//
-// # Vector indices (for each i)
-// ref[indices[i], ...] = updates[i, ...]
-//
-// # High rank indices (for each i, ..., j)
-// ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
-//
-// Arguments:
-// resource: Should be from a `Variable` node.
-// indices: A tensor of indices into the first dimension of `ref`.
-// updates: A tensor of updated values to add to `ref`.
-//
-// Returns the created operation.
-func ResourceScatterUpdate(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
+// Reads and outputs the entire contents of the input filename.
+func ReadFile(scope *Scope, filename tf.Output) (contents tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "ResourceScatterUpdate",
+ Type: "ReadFile",
Input: []tf.Input{
- resource, indices, updates,
+ filename,
},
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// GenerateVocabRemappingAttr is an optional argument to GenerateVocabRemapping.
-type GenerateVocabRemappingAttr func(optionalAttr)
-
-// GenerateVocabRemappingOldVocabSize sets the optional old_vocab_size attribute to value.
+// Concatenates tensors along one dimension.
//
-// value: Number of entries in the old vocab file to consider. If -1,
-// use the entire old vocabulary.
-// If not specified, defaults to -1
+// Arguments:
+// values: List of `N` Tensors to concatenate. Their ranks and types must match,
+// and their sizes must match in all dimensions except `concat_dim`.
+// axis: 0-D. The dimension along which to concatenate. Must be in the
+// range [-rank(values), rank(values)).
//
-// REQUIRES: value >= -1
-func GenerateVocabRemappingOldVocabSize(value int64) GenerateVocabRemappingAttr {
- return func(m optionalAttr) {
- m["old_vocab_size"] = value
+// Returns A `Tensor` with the concatenation of values stacked along the
+// `concat_dim` dimension. This tensor's shape matches that of `values` except
+// in `concat_dim` where it has the sum of the sizes.
+func ConcatV2(scope *Scope, values []tf.Output, axis tf.Output) (output tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "ConcatV2",
+ Input: []tf.Input{
+ tf.OutputList(values), axis,
+ },
}
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Given a path to new and old vocabulary files, returns a remapping Tensor of
-//
-// length `num_new_vocab`, where `remapping[i]` contains the row number in the old
-// vocabulary that corresponds to row `i` in the new vocabulary (starting at line
-// `new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i`
-// in the new vocabulary is not in the old vocabulary. The old vocabulary is
-// constrained to the first `old_vocab_size` entries if `old_vocab_size` is not the
-// default value of -1.
-//
-// `num_vocab_offset` enables
-// use in the partitioned variable case, and should generally be set through
-// examining partitioning info. The format of the files should be a text file,
-// with each line containing a single entity within the vocabulary.
-//
-// For example, with `new_vocab_file` a text file containing each of the following
-// elements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, f3],
-// `num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be
-// `[0, -1, 2]`.
+// Adds a value to the current value of a variable.
//
-// The op also returns a count of how many entries in the new vocabulary
-// were present in the old vocabulary, which is used to calculate the number of
-// values to initialize in a weight matrix remapping
+// Any ReadVariableOp which depends directly or indirectly on this assign is
+// guaranteed to see the incremented value or a subsequent newer one.
//
-// This functionality can be used to remap both row vocabularies (typically,
-// features) and column vocabularies (typically, classes) from TensorFlow
-// checkpoints. Note that the partitioning logic relies on contiguous vocabularies
-// corresponding to div-partitioned variables. Moreover, the underlying remapping
-// uses an IndexTable (as opposed to an inexact CuckooTable), so client code should
-// use the corresponding index_table_from_file() as the FeatureColumn framework
-// does (as opposed to tf.feature_to_id(), which uses a CuckooTable).
+// Outputs the incremented value, which can be used to totally order the
+// increments to this variable.
//
// Arguments:
-// new_vocab_file: Path to the new vocab file.
-// old_vocab_file: Path to the old vocab file.
-// new_vocab_offset: How many entries into the new vocab file to start reading.
-// num_new_vocab: Number of entries in the new vocab file to remap.
+// resource: handle to the resource in which to store the variable.
+// value: the value by which the variable will be incremented.
//
-// Returns A Tensor of length num_new_vocab where the element at index i
-// is equal to the old ID that maps to the new ID i. This element is -1 for any
-// new ID that is not found in the old vocabulary.Number of new vocab entries found in old vocab.
-func GenerateVocabRemapping(scope *Scope, new_vocab_file tf.Output, old_vocab_file tf.Output, new_vocab_offset int64, num_new_vocab int64, optional ...GenerateVocabRemappingAttr) (remapping tf.Output, num_present tf.Output) {
+// Returns the created operation.
+func AssignAddVariableOp(scope *Scope, resource tf.Output, value tf.Output) (o *tf.Operation) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"new_vocab_offset": new_vocab_offset, "num_new_vocab": num_new_vocab}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "GenerateVocabRemapping",
+ Type: "AssignAddVariableOp",
Input: []tf.Input{
- new_vocab_file, old_vocab_file,
+ resource, value,
},
- Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
+ return scope.AddOperation(opspec)
}
-// Computes softsign: `features / (abs(features) + 1)`.
-func Softsign(scope *Scope, features tf.Output) (activations tf.Output) {
+// Records the latency of producing `input_dataset` elements in a StatsAggregator.
+func LatencyStatsDataset(scope *Scope, input_dataset tf.Output, tag tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
opspec := tf.OpSpec{
- Type: "Softsign",
+ Type: "LatencyStatsDataset",
Input: []tf.Input{
- features,
+ input_dataset, tag,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// ResizeBilinearAttr is an optional argument to ResizeBilinear.
-type ResizeBilinearAttr func(optionalAttr)
-
-// ResizeBilinearAlignCorners sets the optional align_corners attribute to value.
-//
-// value: If true, rescale input by (new_height - 1) / (height - 1), which
-// exactly aligns the 4 corners of images and resized images. If false, rescale
-// by new_height / height. Treat similarly the width dimension.
-// If not specified, defaults to false
-func ResizeBilinearAlignCorners(value bool) ResizeBilinearAttr {
- return func(m optionalAttr) {
- m["align_corners"] = value
- }
-}
-
-// Resize `images` to `size` using bilinear interpolation.
+// Convert JSON-encoded Example records to binary protocol buffer strings.
//
-// Input images can be of different types but output images are always float.
+// This op translates a tensor containing Example records, encoded using
+// the [standard JSON
+// mapping](https://developers.google.com/protocol-buffers/docs/proto3#json),
+// into a tensor containing the same records encoded as binary protocol
+// buffers. The resulting tensor can then be fed to any of the other
+// Example-parsing ops.
//
// Arguments:
-// images: 4-D with shape `[batch, height, width, channels]`.
-// size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The
-// new size for the images.
+// json_examples: Each string is a JSON object serialized according to the JSON
+// mapping of the Example proto.
//
-// Returns 4-D with shape
-// `[batch, new_height, new_width, channels]`.
-func ResizeBilinear(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeBilinearAttr) (resized_images tf.Output) {
+// Returns Each string is a binary Example protocol buffer corresponding
+// to the respective element of `json_examples`.
+func DecodeJSONExample(scope *Scope, json_examples tf.Output) (binary_examples tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "ResizeBilinear",
+ Type: "DecodeJSONExample",
Input: []tf.Input{
- images, size,
+ json_examples,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// ProdAttr is an optional argument to Prod.
-type ProdAttr func(optionalAttr)
-
-// ProdKeepDims sets the optional keep_dims attribute to value.
+// Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors.
//
-// value: If true, retain reduced dimensions with length 1.
-// If not specified, defaults to false
-func ProdKeepDims(value bool) ProdAttr {
- return func(m optionalAttr) {
- m["keep_dims"] = value
- }
-}
-
-// Computes the product of elements across dimensions of a tensor.
+// The `input` tensor has shape `[batch, in_height, in_width, depth]` and the
+// `filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each
+// input channel is processed independently of the others with its own structuring
+// function. The `output` tensor has shape
+// `[batch, out_height, out_width, depth]`. The spatial dimensions of the output
+// tensor depend on the `padding` algorithm. We currently only support the default
+// "NHWC" `data_format`.
//
-// Reduces `input` along the dimensions given in `axis`. Unless
-// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
-// `axis`. If `keep_dims` is true, the reduced dimensions are
-// retained with length 1.
+// In detail, the grayscale morphological 2-D dilation is the max-sum correlation
+// (for consistency with `conv2d`, we use unmirrored filters):
+//
+// output[b, y, x, c] =
+// max_{dy, dx} input[b,
+// strides[1] * y + rates[1] * dy,
+// strides[2] * x + rates[2] * dx,
+// c] +
+// filter[dy, dx, c]
+//
+// Max-pooling is a special case when the filter has size equal to the pooling
+// kernel size and contains all zeros.
+//
+// Note on duality: The dilation of `input` by the `filter` is equal to the
+// negation of the erosion of `-input` by the reflected `filter`.
//
// Arguments:
-// input: The tensor to reduce.
-// axis: The dimensions to reduce. Must be in the range
-// `[-rank(input), rank(input))`.
+// input: 4-D with shape `[batch, in_height, in_width, depth]`.
+// filter: 3-D with shape `[filter_height, filter_width, depth]`.
+// strides: The stride of the sliding window for each dimension of the input
+// tensor. Must be: `[1, stride_height, stride_width, 1]`.
+// rates: The input stride for atrous morphological dilation. Must be:
+// `[1, rate_height, rate_width, 1]`.
+// padding: The type of padding algorithm to use.
//
-// Returns The reduced tensor.
-func Prod(scope *Scope, input tf.Output, axis tf.Output, optional ...ProdAttr) (output tf.Output) {
+// Returns 4-D with shape `[batch, out_height, out_width, depth]`.
+func Dilation2D(scope *Scope, input tf.Output, filter tf.Output, strides []int64, rates []int64, padding string) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
+ attrs := map[string]interface{}{"strides": strides, "rates": rates, "padding": padding}
opspec := tf.OpSpec{
- Type: "Prod",
+ Type: "Dilation2D",
Input: []tf.Input{
- input, axis,
+ input, filter,
},
Attrs: attrs,
}
@@ -19605,167 +20039,159 @@ func Prod(scope *Scope, input tf.Output, axis tf.Output, optional ...ProdAttr) (
return op.Output(0)
}
-// StringSplitAttr is an optional argument to StringSplit.
-type StringSplitAttr func(optionalAttr)
-
-// StringSplitSkipEmpty sets the optional skip_empty attribute to value.
+// Converts the given variant tensor to an iterator and stores it in the given resource.
//
-// value: A `bool`. If `True`, skip the empty strings from the result.
-// If not specified, defaults to true
-func StringSplitSkipEmpty(value bool) StringSplitAttr {
+// Arguments:
+// resource_handle: A handle to an iterator resource.
+// serialized: A variant tensor storing the state of the iterator contained in the
+// resource.
+//
+// Returns the created operation.
+func DeserializeIterator(scope *Scope, resource_handle tf.Output, serialized tf.Output) (o *tf.Operation) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "DeserializeIterator",
+ Input: []tf.Input{
+ resource_handle, serialized,
+ },
+ }
+ return scope.AddOperation(opspec)
+}
+
+// TensorArrayConcatV2Attr is an optional argument to TensorArrayConcatV2.
+type TensorArrayConcatV2Attr func(optionalAttr)
+
+// TensorArrayConcatV2ElementShapeExcept0 sets the optional element_shape_except0 attribute to value.
+// If not specified, defaults to <unknown_rank:true >
+func TensorArrayConcatV2ElementShapeExcept0(value tf.Shape) TensorArrayConcatV2Attr {
return func(m optionalAttr) {
- m["skip_empty"] = value
+ m["element_shape_except0"] = value
}
}
-// Split elements of `input` based on `delimiter` into a `SparseTensor`.
-//
-// Let N be the size of source (typically N will be the batch size). Split each
-// element of `input` based on `delimiter` and return a `SparseTensor`
-// containing the splitted tokens. Empty tokens are ignored.
-//
-// `delimiter` can be empty, or a string of split characters. If `delimiter` is an
-// empty string, each element of `input` is split into individual single-byte
-// character strings, including splitting of UTF-8 multibyte sequences. Otherwise
-// every character of `delimiter` is a potential split point.
-//
-// For example:
-// N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output
-// will be
-//
-// indices = [0, 0;
-// 0, 1;
-// 1, 0;
-// 1, 1;
-// 1, 2]
-// shape = [2, 3]
-// values = ['hello', 'world', 'a', 'b', 'c']
-//
-// Arguments:
-// input: 1-D. Strings to split.
-// delimiter: 0-D. Delimiter characters (bytes), or empty string.
-//
-// Returns A dense matrix of int64 representing the indices of the sparse tensor.A vector of strings corresponding to the splited values.a length-2 vector of int64 representing the shape of the sparse
-// tensor, where the first value is N and the second value is the maximum number
-// of tokens in a single input entry.
-func StringSplit(scope *Scope, input tf.Output, delimiter tf.Output, optional ...StringSplitAttr) (indices tf.Output, values tf.Output, shape tf.Output) {
+// Deprecated. Use TensorArrayConcatV3
+func TensorArrayConcatV2(scope *Scope, handle tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayConcatV2Attr) (value tf.Output, lengths tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"dtype": dtype}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "StringSplit",
+ Type: "TensorArrayConcatV2",
Input: []tf.Input{
- input, delimiter,
+ handle, flow_in,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
+ return op.Output(0), op.Output(1)
}
-// Inverse 3D real-valued fast Fourier transform.
+// Creates a dataset that batches and pads `batch_size` elements from the input.
//
-// Computes the inverse 3-dimensional discrete Fourier transform of a real-valued
-// signal over the inner-most 3 dimensions of `input`.
+// Arguments:
//
-// The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`:
-// The inner-most dimension contains the `fft_length / 2 + 1` unique components of
-// the DFT of a real-valued signal. If `fft_length` is not provided, it is computed
-// from the size of the inner-most 3 dimensions of `input`. If the FFT length used
-// to compute `input` is odd, it should be provided since it cannot be inferred
-// properly.
+// batch_size: A scalar representing the number of elements to accumulate in a
+// batch.
+// padded_shapes: A list of int64 tensors representing the desired padded shapes
+// of the corresponding output components. These shapes may be partially
+// specified, using `-1` to indicate that a particular dimension should be
+// padded to the maximum size of all batch elements.
+// padding_values: A list of scalars containing the padding value to use for
+// each of the outputs.
//
-// Along each axis `IRFFT3D` is computed on, if `fft_length` (or
-// `fft_length / 2 + 1` for the inner-most dimension) is smaller than the
-// corresponding dimension of `input`, the dimension is cropped. If it is larger,
-// the dimension is padded with zeros.
+func PaddedBatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, padded_shapes []tf.Output, padding_values []tf.Output, output_shapes []tf.Shape) (handle tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{"output_shapes": output_shapes}
+ opspec := tf.OpSpec{
+ Type: "PaddedBatchDataset",
+ Input: []tf.Input{
+ input_dataset, batch_size, tf.OutputList(padded_shapes), tf.OutputList(padding_values),
+ },
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
+// Creates a dataset that batches input elements into a SparseTensor.
//
// Arguments:
-// input: A complex64 tensor.
-// fft_length: An int32 tensor of shape [3]. The FFT length for each dimension.
+// input_dataset: A handle to an input dataset. Must have a single component.
+// batch_size: A scalar representing the number of elements to accumulate in a
+// batch.
+// row_shape: A vector representing the dense shape of each row in the produced
+// SparseTensor. The shape may be partially specified, using `-1` to indicate
+// that a particular dimension should use the maximum size of all batch elements.
//
-// Returns A float32 tensor of the same rank as `input`. The inner-most 3
-// dimensions of `input` are replaced with the `fft_length` samples of their
-// inverse 3D real Fourier transform.
//
-// @compatibility(numpy)
-// Equivalent to np.irfftn with 3 dimensions.
-// @end_compatibility
-func IRFFT3D(scope *Scope, input tf.Output, fft_length tf.Output) (output tf.Output) {
+func DenseToSparseBatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, row_shape tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
opspec := tf.OpSpec{
- Type: "IRFFT3D",
+ Type: "DenseToSparseBatchDataset",
Input: []tf.Input{
- input, fft_length,
+ input_dataset, batch_size, row_shape,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Returns the truth value of (x != y) element-wise.
-//
-// *NOTE*: `NotEqual` supports broadcasting. More about broadcasting
-// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-func NotEqual(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
+// Deprecated. Use TensorArrayGradV3
+func TensorArrayGradV2(scope *Scope, handle tf.Output, flow_in tf.Output, source string) (grad_handle tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"source": source}
opspec := tf.OpSpec{
- Type: "NotEqual",
+ Type: "TensorArrayGradV2",
Input: []tf.Input{
- x, y,
+ handle, flow_in,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// GatherAttr is an optional argument to Gather.
-type GatherAttr func(optionalAttr)
+// ResourceSparseApplyAdadeltaAttr is an optional argument to ResourceSparseApplyAdadelta.
+type ResourceSparseApplyAdadeltaAttr func(optionalAttr)
-// GatherValidateIndices sets the optional validate_indices attribute to value.
-// If not specified, defaults to true
-func GatherValidateIndices(value bool) GatherAttr {
+// ResourceSparseApplyAdadeltaUseLocking sets the optional use_locking attribute to value.
+//
+// value: If True, updating of the var and accum tensors will be protected by
+// a lock; otherwise the behavior is undefined, but may exhibit less contention.
+// If not specified, defaults to false
+func ResourceSparseApplyAdadeltaUseLocking(value bool) ResourceSparseApplyAdadeltaAttr {
return func(m optionalAttr) {
- m["validate_indices"] = value
+ m["use_locking"] = value
}
}
-// Gather slices from `params` according to `indices`.
-//
-// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
-// Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
-//
-// ```python
-// # Scalar indices
-// output[:, ..., :] = params[indices, :, ... :]
-//
-// # Vector indices
-// output[i, :, ..., :] = params[indices[i], :, ... :]
-//
-// # Higher rank indices
-// output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
-// ```
+// var: Should be from a Variable().
//
-// If `indices` is a permutation and `len(indices) == params.shape[0]` then
-// this operation will permute `params` accordingly.
+// Arguments:
//
-// `validate_indices`: DEPRECATED. If this operation is assigned to CPU, values in
-// `indices` are always validated to be within range. If assigned to GPU,
-// out-of-bound indices result in safe but unspecified behavior, which may include
-// raising an error.
+// accum: Should be from a Variable().
+// accum_update: : Should be from a Variable().
+// lr: Learning rate. Must be a scalar.
+// rho: Decay factor. Must be a scalar.
+// epsilon: Constant factor. Must be a scalar.
+// grad: The gradient.
+// indices: A vector of indices into the first dimension of var and accum.
//
-// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-// <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
-// </div>
-func Gather(scope *Scope, params tf.Output, indices tf.Output, optional ...GatherAttr) (output tf.Output) {
+// Returns the created operation.
+func ResourceSparseApplyAdadelta(scope *Scope, var_ tf.Output, accum tf.Output, accum_update tf.Output, lr tf.Output, rho tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyAdadeltaAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
@@ -19774,31 +20200,28 @@ func Gather(scope *Scope, params tf.Output, indices tf.Output, optional ...Gathe
a(attrs)
}
opspec := tf.OpSpec{
- Type: "Gather",
+ Type: "ResourceSparseApplyAdadelta",
Input: []tf.Input{
- params, indices,
+ var_, accum, accum_update, lr, rho, epsilon, grad, indices,
},
Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// Produce a string tensor that encodes the state of a Reader.
-//
-// Not all Readers support being serialized, so this can produce an
-// Unimplemented error.
+// Identity op for gradient debugging.
//
-// Arguments:
-// reader_handle: Handle to a Reader.
-func ReaderSerializeStateV2(scope *Scope, reader_handle tf.Output) (state tf.Output) {
+// This op is hidden from public in Python. It is used by TensorFlow Debugger to
+// register gradient tensors for gradient debugging.
+// This op operates on non-reference-type tensors.
+func DebugGradientIdentity(scope *Scope, input tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "ReaderSerializeStateV2",
+ Type: "DebugGradientIdentity",
Input: []tf.Input{
- reader_handle,
+ input,
},
}
op := scope.AddOperation(opspec)
@@ -19901,42 +20324,24 @@ func Substr(scope *Scope, input tf.Output, pos tf.Output, len tf.Output) (output
return op.Output(0)
}
-// StatelessRandomNormalAttr is an optional argument to StatelessRandomNormal.
-type StatelessRandomNormalAttr func(optionalAttr)
-
-// StatelessRandomNormalDtype sets the optional dtype attribute to value.
-//
-// value: The type of the output.
-// If not specified, defaults to DT_FLOAT
-func StatelessRandomNormalDtype(value tf.DataType) StatelessRandomNormalAttr {
- return func(m optionalAttr) {
- m["dtype"] = value
- }
-}
-
-// Outputs deterministic pseudorandom values from a normal distribution.
-//
-// The generated values will have mean 0 and standard deviation 1.
-//
-// The outputs are a deterministic function of `shape` and `seed`.
+// Creates a Dataset that returns pseudorandom numbers.
//
// Arguments:
-// shape: The shape of the output tensor.
-// seed: 2 seeds (shape [2]).
+// seed: A scalar seed for the random number generator. If either seed or
+// seed2 is set to be non-zero, the random number generator is seeded
+// by the given seed. Otherwise, a random seed is used.
+// seed2: A second scalar seed to avoid seed collision.
//
-// Returns Random values with specified shape.
-func StatelessRandomNormal(scope *Scope, shape tf.Output, seed tf.Output, optional ...StatelessRandomNormalAttr) (output tf.Output) {
+//
+func RandomDataset(scope *Scope, seed tf.Output, seed2 tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
+ attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
opspec := tf.OpSpec{
- Type: "StatelessRandomNormal",
+ Type: "RandomDataset",
Input: []tf.Input{
- shape, seed,
+ seed, seed2,
},
Attrs: attrs,
}
@@ -19944,105 +20349,61 @@ func StatelessRandomNormal(scope *Scope, shape tf.Output, seed tf.Output, option
return op.Output(0)
}
-// UniqueWithCountsAttr is an optional argument to UniqueWithCounts.
-type UniqueWithCountsAttr func(optionalAttr)
-
-// UniqueWithCountsOutIdx sets the optional out_idx attribute to value.
-// If not specified, defaults to DT_INT32
-func UniqueWithCountsOutIdx(value tf.DataType) UniqueWithCountsAttr {
- return func(m optionalAttr) {
- m["out_idx"] = value
- }
-}
-
-// Finds unique elements in a 1-D tensor.
-//
-// This operation returns a tensor `y` containing all of the unique elements of `x`
-// sorted in the same order that they occur in `x`. This operation also returns a
-// tensor `idx` the same size as `x` that contains the index of each value of `x`
-// in the unique output `y`. Finally, it returns a third tensor `count` that
-// contains the count of each element of `y` in `x`. In other words:
+// Creates a dataset that shuffles and repeats elements from `input_dataset`
//
-// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
+// pseudorandomly.
//
-// For example:
+// Arguments:
//
-// ```
-// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
-// y, idx, count = unique_with_counts(x)
-// y ==> [1, 2, 4, 7, 8]
-// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
-// count ==> [2, 1, 3, 1, 2]
-// ```
+// buffer_size: The number of output elements to buffer in an iterator over
+// this dataset. Compare with the `min_after_dequeue` attr when creating a
+// `RandomShuffleQueue`.
+// seed: A scalar seed for the random number generator. If either `seed` or
+// `seed2` is set to be non-zero, the random number generator is seeded
+// by the given seed. Otherwise, a random seed is used.
+// seed2: A second scalar seed to avoid seed collision.
+// count: A scalar representing the number of times the underlying dataset
+// should be repeated. The default is `-1`, which results in infinite repetition.
//
-// Arguments:
-// x: 1-D.
//
-// Returns 1-D.1-D.1-D.
-func UniqueWithCounts(scope *Scope, x tf.Output, optional ...UniqueWithCountsAttr) (y tf.Output, idx tf.Output, count tf.Output) {
+func ShuffleAndRepeatDataset(scope *Scope, input_dataset tf.Output, buffer_size tf.Output, seed tf.Output, seed2 tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
+ attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
opspec := tf.OpSpec{
- Type: "UniqueWithCounts",
+ Type: "ShuffleAndRepeatDataset",
Input: []tf.Input{
- x,
+ input_dataset, buffer_size, seed, seed2, count,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
+ return op.Output(0)
}
-// RestoreSliceAttr is an optional argument to RestoreSlice.
-type RestoreSliceAttr func(optionalAttr)
-
-// RestoreSlicePreferredShard sets the optional preferred_shard attribute to value.
+// Creates a dataset that caches elements from `input_dataset`.
//
-// value: Index of file to open first if multiple files match
-// `file_pattern`. See the documentation for `Restore`.
-// If not specified, defaults to -1
-func RestoreSlicePreferredShard(value int64) RestoreSliceAttr {
- return func(m optionalAttr) {
- m["preferred_shard"] = value
- }
-}
-
-// Restores a tensor from checkpoint files.
+// A CacheDataset will iterate over the input_dataset, and store tensors. If the
+// cache already exists, the cache will be used. If the cache is inappropriate
+// (e.g. cannot be opened, contains tensors of the wrong shape / size), an error
+// will the returned when used.
//
-// This is like `Restore` except that restored tensor can be listed as filling
-// only a slice of a larger tensor. `shape_and_slice` specifies the shape of the
-// larger tensor and the slice that the restored tensor covers.
+// Arguments:
//
-// The `shape_and_slice` input has the same format as the
-// elements of the `shapes_and_slices` input of the `SaveSlices` op.
+// filename: A path on the filesystem where we should cache the dataset. Note: this
+// will be a directory.
//
-// Arguments:
-// file_pattern: Must have a single element. The pattern of the files from
-// which we read the tensor.
-// tensor_name: Must have a single element. The name of the tensor to be
-// restored.
-// shape_and_slice: Scalar. The shapes and slice specifications to use when
-// restoring a tensors.
-// dt: The type of the tensor to be restored.
//
-// Returns The restored tensor.
-func RestoreSlice(scope *Scope, file_pattern tf.Output, tensor_name tf.Output, shape_and_slice tf.Output, dt tf.DataType, optional ...RestoreSliceAttr) (tensor tf.Output) {
+func CacheDataset(scope *Scope, input_dataset tf.Output, filename tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dt": dt}
- for _, a := range optional {
- a(attrs)
- }
+ attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
opspec := tf.OpSpec{
- Type: "RestoreSlice",
+ Type: "CacheDataset",
Input: []tf.Input{
- file_pattern, tensor_name, shape_and_slice,
+ input_dataset, filename,
},
Attrs: attrs,
}
@@ -20050,337 +20411,273 @@ func RestoreSlice(scope *Scope, file_pattern tf.Output, tensor_name tf.Output, s
return op.Output(0)
}
-// StatelessTruncatedNormalAttr is an optional argument to StatelessTruncatedNormal.
-type StatelessTruncatedNormalAttr func(optionalAttr)
+// PlaceholderAttr is an optional argument to Placeholder.
+type PlaceholderAttr func(optionalAttr)
-// StatelessTruncatedNormalDtype sets the optional dtype attribute to value.
+// PlaceholderShape sets the optional shape attribute to value.
//
-// value: The type of the output.
-// If not specified, defaults to DT_FLOAT
-func StatelessTruncatedNormalDtype(value tf.DataType) StatelessTruncatedNormalAttr {
+// value: (Optional) The shape of the tensor. If the shape has 0 dimensions, the
+// shape is unconstrained.
+// If not specified, defaults to <unknown_rank:true >
+func PlaceholderShape(value tf.Shape) PlaceholderAttr {
return func(m optionalAttr) {
- m["dtype"] = value
+ m["shape"] = value
}
}
-// Outputs deterministic pseudorandom values from a truncated normal distribution.
-//
-// The generated values follow a normal distribution with mean 0 and standard
-// deviation 1, except that values whose magnitude is more than 2 standard
-// deviations from the mean are dropped and re-picked.
+// A placeholder op for a value that will be fed into the computation.
//
-// The outputs are a deterministic function of `shape` and `seed`.
+// N.B. This operation will fail with an error if it is executed. It is
+// intended as a way to represent a value that will always be fed, and to
+// provide attrs that enable the fed value to be checked at runtime.
//
// Arguments:
-// shape: The shape of the output tensor.
-// seed: 2 seeds (shape [2]).
+// dtype: The type of elements in the tensor.
//
-// Returns Random values with specified shape.
-func StatelessTruncatedNormal(scope *Scope, shape tf.Output, seed tf.Output, optional ...StatelessTruncatedNormalAttr) (output tf.Output) {
+// Returns A placeholder tensor that must be replaced using the feed mechanism.
+func Placeholder(scope *Scope, dtype tf.DataType, optional ...PlaceholderAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"dtype": dtype}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "StatelessTruncatedNormal",
- Input: []tf.Input{
- shape, seed,
- },
+ Type: "Placeholder",
+
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Computes the sum along sparse segments of a tensor divided by the sqrt of N.
-//
-// N is the size of the segment being reduced.
-//
-// Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
-// segments.
+// Creates a dataset that executes a SQL query and emits rows of the result set.
//
// Arguments:
+// driver_name: The database type. Currently, the only supported type is 'sqlite'.
+// data_source_name: A connection string to connect to the database.
+// query: A SQL query to execute.
//
-// indices: A 1-D tensor. Has same rank as `segment_ids`.
-// segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
//
-// Returns Has same shape as data, except for dimension 0 which
-// has size `k`, the number of segments.
-func SparseSegmentSqrtN(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output) (output tf.Output) {
+func SqlDataset(scope *Scope, driver_name tf.Output, data_source_name tf.Output, query tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
opspec := tf.OpSpec{
- Type: "SparseSegmentSqrtN",
+ Type: "SqlDataset",
Input: []tf.Input{
- data, indices, segment_ids,
+ driver_name, data_source_name, query,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// ResizeBilinearGradAttr is an optional argument to ResizeBilinearGrad.
-type ResizeBilinearGradAttr func(optionalAttr)
-
-// ResizeBilinearGradAlignCorners sets the optional align_corners attribute to value.
-//
-// value: If true, rescale grads by (orig_height - 1) / (height - 1), which
-// exactly aligns the 4 corners of grads and original_image. If false, rescale by
-// orig_height / height. Treat similarly the width dimension.
-// If not specified, defaults to false
-func ResizeBilinearGradAlignCorners(value bool) ResizeBilinearGradAttr {
- return func(m optionalAttr) {
- m["align_corners"] = value
- }
-}
-
-// Computes the gradient of bilinear interpolation.
+// Creates a dataset that emits the records from one or more binary files.
//
// Arguments:
-// grads: 4-D with shape `[batch, height, width, channels]`.
-// original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`,
-// The image tensor that was resized.
-//
-// Returns 4-D with shape `[batch, orig_height, orig_width, channels]`.
-// Gradients with respect to the input image. Input image must have been
-// float or double.
-func ResizeBilinearGrad(scope *Scope, grads tf.Output, original_image tf.Output, optional ...ResizeBilinearGradAttr) (output tf.Output) {
+// filenames: A scalar or a vector containing the name(s) of the file(s) to be
+// read.
+// header_bytes: A scalar representing the number of bytes to skip at the
+// beginning of a file.
+// record_bytes: A scalar representing the number of bytes in each record.
+// footer_bytes: A scalar representing the number of bytes to skip at the end
+// of a file.
+// buffer_size: A scalar representing the number of bytes to buffer. Must be > 0.
+func FixedLengthRecordDataset(scope *Scope, filenames tf.Output, header_bytes tf.Output, record_bytes tf.Output, footer_bytes tf.Output, buffer_size tf.Output) (handle tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "ResizeBilinearGrad",
+ Type: "FixedLengthRecordDataset",
Input: []tf.Input{
- grads, original_image,
+ filenames, header_bytes, record_bytes, footer_bytes, buffer_size,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Computes the number of elements in the given table.
+// Slice a `SparseTensor` based on the `start` and `size`.
+//
+// For example, if the input is
+//
+// input_tensor = shape = [2, 7]
+// [ a d e ]
+// [b c ]
+//
+// Graphically the output tensors are:
+//
+// sparse_slice([0, 0], [2, 4]) = shape = [2, 4]
+// [ a ]
+// [b c ]
+//
+// sparse_slice([0, 4], [2, 3]) = shape = [2, 3]
+// [ d e ]
+// [ ]
//
// Arguments:
-// table_handle: Handle to the table.
+// indices: 2-D tensor represents the indices of the sparse tensor.
+// values: 1-D tensor represents the values of the sparse tensor.
+// shape: 1-D. tensor represents the shape of the sparse tensor.
+// start: 1-D. tensor represents the start of the slice.
+// size: 1-D. tensor represents the size of the slice.
+// output indices: A list of 1-D tensors represents the indices of the output
+// sparse tensors.
//
-// Returns Scalar that contains number of elements in the table.
-func LookupTableSizeV2(scope *Scope, table_handle tf.Output) (size tf.Output) {
+// Returns A list of 1-D tensors represents the values of the output sparse
+// tensors.A list of 1-D tensors represents the shape of the output sparse
+// tensors.
+func SparseSlice(scope *Scope, indices tf.Output, values tf.Output, shape tf.Output, start tf.Output, size tf.Output) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "LookupTableSizeV2",
+ Type: "SparseSlice",
Input: []tf.Input{
- table_handle,
+ indices, values, shape, start, size,
},
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// Component-wise divides a SparseTensor by a dense Tensor.
-//
-// *Limitation*: this Op only broadcasts the dense side to the sparse side, but not
-// the other direction.
+// Concatenates quantized tensors along one dimension.
//
// Arguments:
-// sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
-// SparseTensor, possibly not in canonical ordering.
-// sp_values: 1-D. `N` non-empty values corresponding to `sp_indices`.
-// sp_shape: 1-D. Shape of the input SparseTensor.
-// dense: `R`-D. The dense Tensor operand.
+// concat_dim: 0-D. The dimension along which to concatenate. Must be in the
+// range [0, rank(values)).
+// values: The `N` Tensors to concatenate. Their ranks and types must match,
+// and their sizes must match in all dimensions except `concat_dim`.
+// input_mins: The minimum scalar values for each of the input tensors.
+// input_maxes: The maximum scalar values for each of the input tensors.
//
-// Returns 1-D. The `N` values that are operated on.
-func SparseDenseCwiseDiv(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output, dense tf.Output) (output tf.Output) {
+// Returns A `Tensor` with the concatenation of values stacked along the
+// `concat_dim` dimension. This tensor's shape matches that of `values` except
+// in `concat_dim` where it has the sum of the sizes.The float value that the minimum quantized output value represents.The float value that the maximum quantized output value represents.
+func QuantizedConcat(scope *Scope, concat_dim tf.Output, values []tf.Output, input_mins []tf.Output, input_maxes []tf.Output) (output tf.Output, output_min tf.Output, output_max tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "SparseDenseCwiseDiv",
+ Type: "QuantizedConcat",
Input: []tf.Input{
- sp_indices, sp_values, sp_shape, dense,
+ concat_dim, tf.OutputList(values), tf.OutputList(input_mins), tf.OutputList(input_maxes),
},
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// Reads the value of a variable.
+// Gradients for batch normalization.
//
-// The tensor returned by this operation is immutable.
+// DEPRECATED at GraphDef version 9: Use tf.nn.batch_normalization()
//
-// The value returned by this operation is guaranteed to be influenced by all the
-// writes on which this operation depends directly or indirectly, and to not be
-// influenced by any of the writes which depend directly or indirectly on this
-// operation.
+// This op is deprecated. See `tf.nn.batch_normalization`.
//
// Arguments:
-// resource: handle to the resource in which to store the variable.
-// dtype: the dtype of the value.
-func ReadVariableOp(scope *Scope, resource tf.Output, dtype tf.DataType) (value tf.Output) {
+// t: A 4D input Tensor.
+// m: A 1D mean Tensor with size matching the last dimension of t.
+// This is the first output from tf.nn.moments,
+// or a saved moving average thereof.
+// v: A 1D variance Tensor with size matching the last dimension of t.
+// This is the second output from tf.nn.moments,
+// or a saved moving average thereof.
+// gamma: A 1D gamma Tensor with size matching the last dimension of t.
+// If "scale_after_normalization" is true, this Tensor will be multiplied
+// with the normalized Tensor.
+// backprop: 4D backprop Tensor.
+// variance_epsilon: A small float number to avoid dividing by 0.
+// scale_after_normalization: A bool indicating whether the resulted tensor
+// needs to be multiplied with gamma.
+//
+// Returns 4D backprop tensor for input.1D backprop tensor for mean.1D backprop tensor for variance.1D backprop tensor for beta.1D backprop tensor for gamma.
+func BatchNormWithGlobalNormalizationGrad(scope *Scope, t tf.Output, m tf.Output, v tf.Output, gamma tf.Output, backprop tf.Output, variance_epsilon float32, scale_after_normalization bool) (dx tf.Output, dm tf.Output, dv tf.Output, db tf.Output, dg tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtype": dtype}
+ attrs := map[string]interface{}{"variance_epsilon": variance_epsilon, "scale_after_normalization": scale_after_normalization}
opspec := tf.OpSpec{
- Type: "ReadVariableOp",
+ Type: "BatchNormWithGlobalNormalizationGrad",
Input: []tf.Input{
- resource,
+ t, m, v, gamma, backprop,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
}
-// Associates the given iterator with the given statistics aggregator.
+// Creates a dataset that emits the records from one or more TFRecord files.
//
-// Returns the created operation.
-func IteratorSetStatsAggregator(scope *Scope, iterator_handle tf.Output, stats_aggregator_handle tf.Output) (o *tf.Operation) {
+// Arguments:
+// filenames: A scalar or vector containing the name(s) of the file(s) to be
+// read.
+// compression_type: A scalar containing either (i) the empty string (no
+// compression), (ii) "ZLIB", or (iii) "GZIP".
+// buffer_size: A scalar representing the number of bytes to buffer. A value of
+// 0 means no buffering will be performed.
+func TFRecordDataset(scope *Scope, filenames tf.Output, compression_type tf.Output, buffer_size tf.Output) (handle tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "IteratorSetStatsAggregator",
+ Type: "TFRecordDataset",
Input: []tf.Input{
- iterator_handle, stats_aggregator_handle,
+ filenames, compression_type, buffer_size,
},
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// ResourceSparseApplyFtrlV2Attr is an optional argument to ResourceSparseApplyFtrlV2.
-type ResourceSparseApplyFtrlV2Attr func(optionalAttr)
+// FakeQuantWithMinMaxArgsGradientAttr is an optional argument to FakeQuantWithMinMaxArgsGradient.
+type FakeQuantWithMinMaxArgsGradientAttr func(optionalAttr)
-// ResourceSparseApplyFtrlV2UseLocking sets the optional use_locking attribute to value.
-//
-// value: If `True`, updating of the var and accum tensors will be protected
-// by a lock; otherwise the behavior is undefined, but may exhibit less
-// contention.
-// If not specified, defaults to false
-func ResourceSparseApplyFtrlV2UseLocking(value bool) ResourceSparseApplyFtrlV2Attr {
+// FakeQuantWithMinMaxArgsGradientMin sets the optional min attribute to value.
+// If not specified, defaults to -6
+func FakeQuantWithMinMaxArgsGradientMin(value float32) FakeQuantWithMinMaxArgsGradientAttr {
return func(m optionalAttr) {
- m["use_locking"] = value
- }
-}
-
-// Update relevant entries in '*var' according to the Ftrl-proximal scheme.
-//
-// That is for rows we have grad for, we update var, accum and linear as follows:
-// grad_with_shrinkage = grad + 2 * l2_shrinkage * var
-// accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
-// linear += grad_with_shrinkage +
-// (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
-// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
-// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
-// accum = accum_new
-//
-// Arguments:
-// var_: Should be from a Variable().
-// accum: Should be from a Variable().
-// linear: Should be from a Variable().
-// grad: The gradient.
-// indices: A vector of indices into the first dimension of var and accum.
-// lr: Scaling factor. Must be a scalar.
-// l1: L1 regularization. Must be a scalar.
-// l2: L2 shrinkage regulariation. Must be a scalar.
-//
-// lr_power: Scaling factor. Must be a scalar.
-//
-// Returns the created operation.
-func ResourceSparseApplyFtrlV2(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, indices tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, l2_shrinkage tf.Output, lr_power tf.Output, optional ...ResourceSparseApplyFtrlV2Attr) (o *tf.Operation) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
- opspec := tf.OpSpec{
- Type: "ResourceSparseApplyFtrlV2",
- Input: []tf.Input{
- var_, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, lr_power,
- },
- Attrs: attrs,
- }
- return scope.AddOperation(opspec)
-}
-
-// Restore a reader to a previously saved state.
-//
-// Not all Readers support being restored, so this can produce an
-// Unimplemented error.
-//
-// Arguments:
-// reader_handle: Handle to a Reader.
-// state: Result of a ReaderSerializeState of a Reader with type
-// matching reader_handle.
-//
-// Returns the created operation.
-func ReaderRestoreStateV2(scope *Scope, reader_handle tf.Output, state tf.Output) (o *tf.Operation) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "ReaderRestoreStateV2",
- Input: []tf.Input{
- reader_handle, state,
- },
+ m["min"] = value
}
- return scope.AddOperation(opspec)
}
-// Computes the absolute value of a tensor.
-//
-// Given a tensor `x`, this operation returns a tensor containing the absolute
-// value of each element in `x`. For example, if x is an input element and y is
-// an output element, this operation computes \\(y = |x|\\).
-func Abs(scope *Scope, x tf.Output) (y tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "Abs",
- Input: []tf.Input{
- x,
- },
+// FakeQuantWithMinMaxArgsGradientMax sets the optional max attribute to value.
+// If not specified, defaults to 6
+func FakeQuantWithMinMaxArgsGradientMax(value float32) FakeQuantWithMinMaxArgsGradientAttr {
+ return func(m optionalAttr) {
+ m["max"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// RandomPoissonAttr is an optional argument to RandomPoisson.
-type RandomPoissonAttr func(optionalAttr)
-
-// RandomPoissonSeed sets the optional seed attribute to value.
-// If not specified, defaults to 0
-func RandomPoissonSeed(value int64) RandomPoissonAttr {
+// FakeQuantWithMinMaxArgsGradientNumBits sets the optional num_bits attribute to value.
+// If not specified, defaults to 8
+func FakeQuantWithMinMaxArgsGradientNumBits(value int64) FakeQuantWithMinMaxArgsGradientAttr {
return func(m optionalAttr) {
- m["seed"] = value
+ m["num_bits"] = value
}
}
-// RandomPoissonSeed2 sets the optional seed2 attribute to value.
-// If not specified, defaults to 0
-func RandomPoissonSeed2(value int64) RandomPoissonAttr {
+// FakeQuantWithMinMaxArgsGradientNarrowRange sets the optional narrow_range attribute to value.
+// If not specified, defaults to false
+func FakeQuantWithMinMaxArgsGradientNarrowRange(value bool) FakeQuantWithMinMaxArgsGradientAttr {
return func(m optionalAttr) {
- m["seed2"] = value
+ m["narrow_range"] = value
}
}
-// Use RandomPoissonV2 instead.
+// Compute gradients for a FakeQuantWithMinMaxArgs operation.
//
-// DEPRECATED at GraphDef version 25: Replaced by RandomPoissonV2
-func RandomPoisson(scope *Scope, shape tf.Output, rate tf.Output, optional ...RandomPoissonAttr) (output tf.Output) {
+// Arguments:
+// gradients: Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.
+// inputs: Values passed as inputs to the FakeQuantWithMinMaxArgs operation.
+//
+// Returns Backpropagated gradients below the FakeQuantWithMinMaxArgs operation:
+// `gradients * (inputs >= min && inputs <= max)`.
+func FakeQuantWithMinMaxArgsGradient(scope *Scope, gradients tf.Output, inputs tf.Output, optional ...FakeQuantWithMinMaxArgsGradientAttr) (backprops tf.Output) {
if scope.Err() != nil {
return
}
@@ -20389,9 +20686,9 @@ func RandomPoisson(scope *Scope, shape tf.Output, rate tf.Output, optional ...Ra
a(attrs)
}
opspec := tf.OpSpec{
- Type: "RandomPoisson",
+ Type: "FakeQuantWithMinMaxArgsGradient",
Input: []tf.Input{
- shape, rate,
+ gradients, inputs,
},
Attrs: attrs,
}
@@ -20399,273 +20696,171 @@ func RandomPoisson(scope *Scope, shape tf.Output, rate tf.Output, optional ...Ra
return op.Output(0)
}
-// Applies softmax to a batched N-D `SparseTensor`.
+// BatchToSpace for 4-D tensors of type T.
//
-// The inputs represent an N-D SparseTensor with logical shape `[..., B, C]`
-// (where `N >= 2`), and with indices sorted in the canonical lexicographic order.
+// This is a legacy version of the more general BatchToSpaceND.
//
-// This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost
-// logical submatrix with shape `[B, C]`, but with the catch that *the implicitly
-// zero elements do not participate*. Specifically, the algorithm is equivalent
-// to the following:
+// Rearranges (permutes) data from batch into blocks of spatial data, followed by
+// cropping. This is the reverse transformation of SpaceToBatch. More specifically,
+// this op outputs a copy of the input tensor where values from the `batch`
+// dimension are moved in spatial blocks to the `height` and `width` dimensions,
+// followed by cropping along the `height` and `width` dimensions.
//
-// (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix
-// with shape `[B, C]`, along the size-C dimension;
-// (2) Masks out the original implicitly-zero locations;
-// (3) Renormalizes the remaining elements.
+// Arguments:
+// input: 4-D tensor with shape
+// `[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
+// depth]`. Note that the batch size of the input tensor must be divisible by
+// `block_size * block_size`.
+// crops: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
+// how many elements to crop from the intermediate result across the spatial
+// dimensions as follows:
//
-// Hence, the `SparseTensor` result has exactly the same non-zero indices and
-// shape.
+// crops = [[crop_top, crop_bottom], [crop_left, crop_right]]
//
-// Arguments:
-// sp_indices: 2-D. `NNZ x R` matrix with the indices of non-empty values in a
-// SparseTensor, in canonical ordering.
-// sp_values: 1-D. `NNZ` non-empty values corresponding to `sp_indices`.
-// sp_shape: 1-D. Shape of the input SparseTensor.
//
-// Returns 1-D. The `NNZ` values for the result `SparseTensor`.
-func SparseSoftmax(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output) (output tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "SparseSoftmax",
- Input: []tf.Input{
- sp_indices, sp_values, sp_shape,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// Computes gradients for SparseSegmentMean.
+// Returns 4-D with shape `[batch, height, width, depth]`, where:
//
-// Returns tensor "output" with same shape as grad, except for dimension 0 whose
-// value is output_dim0.
+// height = height_pad - crop_top - crop_bottom
+// width = width_pad - crop_left - crop_right
//
-// Arguments:
-// grad: gradient propagated to the SparseSegmentMean op.
-// indices: indices passed to the corresponding SparseSegmentMean op.
-// segment_ids: segment_ids passed to the corresponding SparseSegmentMean op.
-// output_dim0: dimension 0 of "data" passed to SparseSegmentMean op.
-func SparseSegmentMeanGrad(scope *Scope, grad tf.Output, indices tf.Output, segment_ids tf.Output, output_dim0 tf.Output) (output tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "SparseSegmentMeanGrad",
- Input: []tf.Input{
- grad, indices, segment_ids, output_dim0,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// Applies sparse addition to `input` using individual values or slices
+// The attr `block_size` must be greater than one. It indicates the block size.
//
-// from `updates` according to indices `indices`. The updates are non-aliasing:
-// `input` is only modified in-place if no other operations will use it.
-// Otherwise, a copy of `input` is made. This operation has a gradient with
-// respect to both `input` and `updates`.
+// Some examples:
//
-// `input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
+// (1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2:
//
-// `indices` must be integer tensor, containing indices into `input`.
-// It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+// ```
+// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
+// ```
//
-// The innermost dimension of `indices` (with length `K`) corresponds to
-// indices into elements (if `K = P`) or `(P-K)`-dimensional slices
-// (if `K < P`) along the `K`th dimension of `input`.
+// The output tensor has shape `[1, 2, 2, 1]` and value:
//
-// `updates` is `Tensor` of rank `Q-1+P-K` with shape:
+// ```
+// x = [[[[1], [2]], [[3], [4]]]]
+// ```
+//
+// (2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2:
//
// ```
-// [d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].
+// [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
// ```
//
-// For example, say we want to add 4 scattered elements to a rank-1 tensor to 8
-// elements. In Python, that addition would look like this:
+// The output tensor has shape `[1, 2, 2, 3]` and value:
//
-// input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8])
-// indices = tf.constant([[4], [3], [1], [7]])
-// updates = tf.constant([9, 10, 11, 12])
-// output = tf.scatter_nd_non_aliasing_add(input, indices, updates)
-// with tf.Session() as sess:
-// print(sess.run(output))
+// ```
+// x = [[[[1, 2, 3], [4, 5, 6]],
+// [[7, 8, 9], [10, 11, 12]]]]
+// ```
//
-// The resulting value `output` would look like this:
+// (3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2:
//
-// [1, 13, 3, 14, 14, 6, 7, 20]
+// ```
+// x = [[[[1], [3]], [[9], [11]]],
+// [[[2], [4]], [[10], [12]]],
+// [[[5], [7]], [[13], [15]]],
+// [[[6], [8]], [[14], [16]]]]
+// ```
//
-// See @{tf.scatter_nd} for more details about how to make updates to slices.
+// The output tensor has shape `[1, 4, 4, 1]` and value:
//
-// Arguments:
-// input: A Tensor.
-// indices: A Tensor. Must be one of the following types: `int32`, `int64`.
-// A tensor of indices into `input`.
-// updates: A Tensor. Must have the same type as ref. A tensor of updated values
-// to add to `input`.
+// ```
+// x = [[[1], [2], [3], [4]],
+// [[5], [6], [7], [8]],
+// [[9], [10], [11], [12]],
+// [[13], [14], [15], [16]]]
+// ```
//
-// Returns A `Tensor` with the same shape as `input`, containing values of `input`
-// updated with `updates`.
-func ScatterNdNonAliasingAdd(scope *Scope, input tf.Output, indices tf.Output, updates tf.Output) (output tf.Output) {
+// (4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2:
+//
+// ```
+// x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
+// [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
+// ```
+//
+// The output tensor has shape `[2, 2, 4, 1]` and value:
+//
+// ```
+// x = [[[[1], [3]], [[5], [7]]],
+// [[[2], [4]], [[10], [12]]],
+// [[[5], [7]], [[13], [15]]],
+// [[[6], [8]], [[14], [16]]]]
+// ```
+func BatchToSpace(scope *Scope, input tf.Output, crops tf.Output, block_size int64) (output tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"block_size": block_size}
opspec := tf.OpSpec{
- Type: "ScatterNdNonAliasingAdd",
+ Type: "BatchToSpace",
Input: []tf.Input{
- input, indices, updates,
+ input, crops,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// QuantizedReluXAttr is an optional argument to QuantizedReluX.
-type QuantizedReluXAttr func(optionalAttr)
-
-// QuantizedReluXOutType sets the optional out_type attribute to value.
-// If not specified, defaults to DT_QUINT8
-func QuantizedReluXOutType(value tf.DataType) QuantizedReluXAttr {
- return func(m optionalAttr) {
- m["out_type"] = value
- }
-}
-
-// Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)`
-//
-// Arguments:
-//
+// Makes a new iterator from the given `dataset` and stores it in `iterator`.
//
-// min_features: The float value that the lowest quantized value represents.
-// max_features: The float value that the highest quantized value represents.
+// This operation may be executed multiple times. Each execution will reset the
+// iterator in `iterator` to the first element of `dataset`.
//
-// Returns Has the same output shape as "features".The float value that the lowest quantized value represents.The float value that the highest quantized value represents.
-func QuantizedReluX(scope *Scope, features tf.Output, max_value tf.Output, min_features tf.Output, max_features tf.Output, optional ...QuantizedReluXAttr) (activations tf.Output, min_activations tf.Output, max_activations tf.Output) {
+// Returns the created operation.
+func MakeIterator(scope *Scope, dataset tf.Output, iterator tf.Output) (o *tf.Operation) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "QuantizedReluX",
+ Type: "MakeIterator",
Input: []tf.Input{
- features, max_value, min_features, max_features,
+ dataset, iterator,
},
- Attrs: attrs,
- }
- op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
-}
-
-// UnpackAttr is an optional argument to Unpack.
-type UnpackAttr func(optionalAttr)
-
-// UnpackAxis sets the optional axis attribute to value.
-//
-// value: Dimension along which to unpack. Negative values wrap around, so the
-// valid range is `[-R, R)`.
-// If not specified, defaults to 0
-func UnpackAxis(value int64) UnpackAttr {
- return func(m optionalAttr) {
- m["axis"] = value
}
+ return scope.AddOperation(opspec)
}
-// Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors.
-//
-// Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
-// For example, given a tensor of shape `(A, B, C, D)`;
+// Adjust the contrast of one or more images.
//
-// If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]`
-// and each tensor in `output` will have shape `(B, C, D)`. (Note that the
-// dimension unpacked along is gone, unlike `split`).
+// `images` is a tensor of at least 3 dimensions. The last 3 dimensions are
+// interpreted as `[height, width, channels]`. The other dimensions only
+// represent a collection of images, such as `[batch, height, width, channels].`
//
-// If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]`
-// and each tensor in `output` will have shape `(A, C, D)`.
-// Etc.
+// Contrast is adjusted independently for each channel of each image.
//
-// This is the opposite of `pack`.
+// For each channel, the Op first computes the mean of the image pixels in the
+// channel and then adjusts each component of each pixel to
+// `(x - mean) * contrast_factor + mean`.
//
// Arguments:
-// value: 1-D or higher, with `axis` dimension size equal to `num`.
-//
+// images: Images to adjust. At least 3-D.
+// contrast_factor: A float multiplier for adjusting contrast.
//
-// Returns The list of tensors unpacked from `value`.
-func Unpack(scope *Scope, value tf.Output, num int64, optional ...UnpackAttr) (output []tf.Output) {
+// Returns The contrast-adjusted image or images.
+func AdjustContrastv2(scope *Scope, images tf.Output, contrast_factor tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"num": num}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "Unpack",
+ Type: "AdjustContrastv2",
Input: []tf.Input{
- value,
+ images, contrast_factor,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
- if scope.Err() != nil {
- return
- }
- var idx int
- var err error
- if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
- scope.UpdateErr("Unpack", err)
- return
- }
- return output
+ return op.Output(0)
}
-// Split a `SparseTensor` into `num_split` tensors along one dimension.
-//
-// If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices
-// `[0 : shape[split_dim] % num_split]` gets one extra dimension.
-// For example, if `split_dim = 1` and `num_split = 2` and the input is
-//
-// input_tensor = shape = [2, 7]
-// [ a d e ]
-// [b c ]
-//
-// Graphically the output tensors are:
-//
-// output_tensor[0] = shape = [2, 4]
-// [ a ]
-// [b c ]
-//
-// output_tensor[1] = shape = [2, 3]
-// [ d e ]
-// [ ]
-//
-// Arguments:
-// split_dim: 0-D. The dimension along which to split. Must be in the range
-// `[0, rank(shape))`.
-// indices: 2-D tensor represents the indices of the sparse tensor.
-// values: 1-D tensor represents the values of the sparse tensor.
-// shape: 1-D. tensor represents the shape of the sparse tensor.
-// output indices: A list of 1-D tensors represents the indices of the output
-// sparse tensors.
-// num_split: The number of ways to split.
-//
-// Returns A list of 1-D tensors represents the values of the output sparse
-// tensors.A list of 1-D tensors represents the shape of the output sparse
-// tensors.
-func SparseSplit(scope *Scope, split_dim tf.Output, indices tf.Output, values tf.Output, shape tf.Output, num_split int64) (output_indices []tf.Output, output_values []tf.Output, output_shape []tf.Output) {
+// Gets the next output from the given iterator.
+func IteratorGetNext(scope *Scope, iterator tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"num_split": num_split}
+ attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
opspec := tf.OpSpec{
- Type: "SparseSplit",
+ Type: "IteratorGetNext",
Input: []tf.Input{
- split_dim, indices, values, shape,
+ iterator,
},
Attrs: attrs,
}
@@ -20675,140 +20870,81 @@ func SparseSplit(scope *Scope, split_dim tf.Output, indices tf.Output, values tf
}
var idx int
var err error
- if output_indices, idx, err = makeOutputList(op, idx, "output_indices"); err != nil {
- scope.UpdateErr("SparseSplit", err)
- return
- }
- if output_values, idx, err = makeOutputList(op, idx, "output_values"); err != nil {
- scope.UpdateErr("SparseSplit", err)
- return
- }
- if output_shape, idx, err = makeOutputList(op, idx, "output_shape"); err != nil {
- scope.UpdateErr("SparseSplit", err)
+ if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
+ scope.UpdateErr("IteratorGetNext", err)
return
}
- return output_indices, output_values, output_shape
-}
-
-// ReduceJoinAttr is an optional argument to ReduceJoin.
-type ReduceJoinAttr func(optionalAttr)
-
-// ReduceJoinKeepDims sets the optional keep_dims attribute to value.
-//
-// value: If `True`, retain reduced dimensions with length `1`.
-// If not specified, defaults to false
-func ReduceJoinKeepDims(value bool) ReduceJoinAttr {
- return func(m optionalAttr) {
- m["keep_dims"] = value
- }
-}
-
-// ReduceJoinSeparator sets the optional separator attribute to value.
-//
-// value: The separator to use when joining.
-// If not specified, defaults to ""
-func ReduceJoinSeparator(value string) ReduceJoinAttr {
- return func(m optionalAttr) {
- m["separator"] = value
- }
+ return components
}
-// Joins a string Tensor across the given dimensions.
+// Outputs the single element from the given dataset.
//
-// Computes the string join across dimensions in the given string Tensor of shape
-// `[d_0, d_1, ..., d_n-1]`. Returns a new Tensor created by joining the input
-// strings with the given separator (default: empty string). Negative indices are
-// counted backwards from the end, with `-1` being equivalent to `n - 1`.
+// Arguments:
+// dataset: A handle to a dataset that contains a single element.
//
-// For example:
//
-// ```python
-// # tensor `a` is [["a", "b"], ["c", "d"]]
-// tf.reduce_join(a, 0) ==> ["ac", "bd"]
-// tf.reduce_join(a, 1) ==> ["ab", "cd"]
-// tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"]
-// tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"]
-// tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]]
-// tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]]
-// tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"]
-// tf.reduce_join(a, [0, 1]) ==> ["acbd"]
-// tf.reduce_join(a, [1, 0]) ==> ["abcd"]
-// tf.reduce_join(a, []) ==> ["abcd"]
-// ```
-//
-// Arguments:
-// inputs: The input to be joined. All reduced indices must have non-zero size.
-// reduction_indices: The dimensions to reduce over. Dimensions are reduced in the
-// order specified. Omitting `reduction_indices` is equivalent to passing
-// `[n-1, n-2, ..., 0]`. Negative indices from `-n` to `-1` are supported.
//
-// Returns Has shape equal to that of the input with reduced dimensions removed or
-// set to `1` depending on `keep_dims`.
-func ReduceJoin(scope *Scope, inputs tf.Output, reduction_indices tf.Output, optional ...ReduceJoinAttr) (output tf.Output) {
+// Returns The components of the single element of `input`.
+func DatasetToSingleElement(scope *Scope, dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
+ attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
opspec := tf.OpSpec{
- Type: "ReduceJoin",
+ Type: "DatasetToSingleElement",
Input: []tf.Input{
- inputs, reduction_indices,
+ dataset,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ if scope.Err() != nil {
+ return
+ }
+ var idx int
+ var err error
+ if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
+ scope.UpdateErr("DatasetToSingleElement", err)
+ return
+ }
+ return components
}
-// Computes element-wise population count (a.k.a. popcount, bitsum, bitcount).
+// Converts the given `resource_handle` representing an iterator to a string.
//
-// For each entry in `x`, calculates the number of `1` (on) bits in the binary
-// representation of that entry.
+// Arguments:
+// resource_handle: A handle to an iterator resource.
//
-// **NOTE**: It is more efficient to first `tf.bitcast` your tensors into
-// `int32` or `int64` and perform the bitcount on the result, than to feed in
-// 8- or 16-bit inputs and then aggregate the resulting counts.
-func PopulationCount(scope *Scope, x tf.Output) (y tf.Output) {
+// Returns A string representation of the given handle.
+func IteratorToStringHandle(scope *Scope, resource_handle tf.Output) (string_handle tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "PopulationCount",
+ Type: "IteratorToStringHandle",
Input: []tf.Input{
- x,
+ resource_handle,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// AssertAttr is an optional argument to Assert.
-type AssertAttr func(optionalAttr)
+// ShapeNAttr is an optional argument to ShapeN.
+type ShapeNAttr func(optionalAttr)
-// AssertSummarize sets the optional summarize attribute to value.
-//
-// value: Print this many entries of each tensor.
-// If not specified, defaults to 3
-func AssertSummarize(value int64) AssertAttr {
+// ShapeNOutType sets the optional out_type attribute to value.
+// If not specified, defaults to DT_INT32
+func ShapeNOutType(value tf.DataType) ShapeNAttr {
return func(m optionalAttr) {
- m["summarize"] = value
+ m["out_type"] = value
}
}
-// Asserts that the given condition is true.
-//
-// If `condition` evaluates to false, print the list of tensors in `data`.
-// `summarize` determines how many entries of the tensors to print.
-//
-// Arguments:
-// condition: The condition to evaluate.
-// data: The tensors to print out when condition is false.
+// Returns shape of tensors.
//
-// Returns the created operation.
-func Assert(scope *Scope, condition tf.Output, data []tf.Output, optional ...AssertAttr) (o *tf.Operation) {
+// This operation returns N 1-D integer tensors representing shape of `input[i]s`.
+func ShapeN(scope *Scope, input []tf.Output, optional ...ShapeNAttr) (output []tf.Output) {
if scope.Err() != nil {
return
}
@@ -20817,148 +20953,61 @@ func Assert(scope *Scope, condition tf.Output, data []tf.Output, optional ...Ass
a(attrs)
}
opspec := tf.OpSpec{
- Type: "Assert",
+ Type: "ShapeN",
Input: []tf.Input{
- condition, tf.OutputList(data),
+ tf.OutputList(input),
},
Attrs: attrs,
}
- return scope.AddOperation(opspec)
-}
-
-// RandomUniformAttr is an optional argument to RandomUniform.
-type RandomUniformAttr func(optionalAttr)
-
-// RandomUniformSeed sets the optional seed attribute to value.
-//
-// value: If either `seed` or `seed2` are set to be non-zero, the random number
-// generator is seeded by the given seed. Otherwise, it is seeded by a
-// random seed.
-// If not specified, defaults to 0
-func RandomUniformSeed(value int64) RandomUniformAttr {
- return func(m optionalAttr) {
- m["seed"] = value
- }
-}
-
-// RandomUniformSeed2 sets the optional seed2 attribute to value.
-//
-// value: A second seed to avoid seed collision.
-// If not specified, defaults to 0
-func RandomUniformSeed2(value int64) RandomUniformAttr {
- return func(m optionalAttr) {
- m["seed2"] = value
- }
-}
-
-// Outputs random values from a uniform distribution.
-//
-// The generated values follow a uniform distribution in the range `[0, 1)`. The
-// lower bound 0 is included in the range, while the upper bound 1 is excluded.
-//
-// Arguments:
-// shape: The shape of the output tensor.
-// dtype: The type of the output.
-//
-// Returns A tensor of the specified shape filled with uniform random values.
-func RandomUniform(scope *Scope, shape tf.Output, dtype tf.DataType, optional ...RandomUniformAttr) (output tf.Output) {
+ op := scope.AddOperation(opspec)
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtype": dtype}
- for _, a := range optional {
- a(attrs)
- }
- opspec := tf.OpSpec{
- Type: "RandomUniform",
- Input: []tf.Input{
- shape,
- },
- Attrs: attrs,
+ var idx int
+ var err error
+ if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
+ scope.UpdateErr("ShapeN", err)
+ return
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return output
}
-// ResourceApplyFtrlAttr is an optional argument to ResourceApplyFtrl.
-type ResourceApplyFtrlAttr func(optionalAttr)
+// IteratorFromStringHandleAttr is an optional argument to IteratorFromStringHandle.
+type IteratorFromStringHandleAttr func(optionalAttr)
-// ResourceApplyFtrlUseLocking sets the optional use_locking attribute to value.
+// IteratorFromStringHandleOutputTypes sets the optional output_types attribute to value.
//
-// value: If `True`, updating of the var and accum tensors will be protected
-// by a lock; otherwise the behavior is undefined, but may exhibit less
-// contention.
-// If not specified, defaults to false
-func ResourceApplyFtrlUseLocking(value bool) ResourceApplyFtrlAttr {
+// value: If specified, defines the type of each tuple component in an
+// element produced by the resulting iterator.
+// If not specified, defaults to <>
+//
+// REQUIRES: len(value) >= 0
+func IteratorFromStringHandleOutputTypes(value []tf.DataType) IteratorFromStringHandleAttr {
return func(m optionalAttr) {
- m["use_locking"] = value
+ m["output_types"] = value
}
}
-// Update '*var' according to the Ftrl-proximal scheme.
-//
-// accum_new = accum + grad * grad
-// linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
-// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
-// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
-// accum = accum_new
-//
-// Arguments:
-// var_: Should be from a Variable().
-// accum: Should be from a Variable().
-// linear: Should be from a Variable().
-// grad: The gradient.
-// lr: Scaling factor. Must be a scalar.
-// l1: L1 regulariation. Must be a scalar.
-// l2: L2 regulariation. Must be a scalar.
-// lr_power: Scaling factor. Must be a scalar.
+// IteratorFromStringHandleOutputShapes sets the optional output_shapes attribute to value.
//
-// Returns the created operation.
-func ResourceApplyFtrl(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, lr_power tf.Output, optional ...ResourceApplyFtrlAttr) (o *tf.Operation) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
- opspec := tf.OpSpec{
- Type: "ResourceApplyFtrl",
- Input: []tf.Input{
- var_, accum, linear, grad, lr, l1, l2, lr_power,
- },
- Attrs: attrs,
- }
- return scope.AddOperation(opspec)
-}
-
-// AnyAttr is an optional argument to Any.
-type AnyAttr func(optionalAttr)
-
-// AnyKeepDims sets the optional keep_dims attribute to value.
+// value: If specified, defines the shape of each tuple component in an
+// element produced by the resulting iterator.
+// If not specified, defaults to <>
//
-// value: If true, retain reduced dimensions with length 1.
-// If not specified, defaults to false
-func AnyKeepDims(value bool) AnyAttr {
+// REQUIRES: len(value) >= 0
+func IteratorFromStringHandleOutputShapes(value []tf.Shape) IteratorFromStringHandleAttr {
return func(m optionalAttr) {
- m["keep_dims"] = value
+ m["output_shapes"] = value
}
}
-// Computes the "logical or" of elements across dimensions of a tensor.
-//
-// Reduces `input` along the dimensions given in `axis`. Unless
-// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
-// `axis`. If `keep_dims` is true, the reduced dimensions are
-// retained with length 1.
+// Converts the given string representing a handle to an iterator to a resource.
//
// Arguments:
-// input: The tensor to reduce.
-// axis: The dimensions to reduce. Must be in the range
-// `[-rank(input), rank(input))`.
+// string_handle: A string representation of the given handle.
//
-// Returns The reduced tensor.
-func Any(scope *Scope, input tf.Output, axis tf.Output, optional ...AnyAttr) (output tf.Output) {
+// Returns A handle to an iterator resource.
+func IteratorFromStringHandle(scope *Scope, string_handle tf.Output, optional ...IteratorFromStringHandleAttr) (resource_handle tf.Output) {
if scope.Err() != nil {
return
}
@@ -20967,9 +21016,9 @@ func Any(scope *Scope, input tf.Output, axis tf.Output, optional ...AnyAttr) (ou
a(attrs)
}
opspec := tf.OpSpec{
- Type: "Any",
+ Type: "IteratorFromStringHandle",
Input: []tf.Input{
- input, axis,
+ string_handle,
},
Attrs: attrs,
}
@@ -20977,401 +21026,232 @@ func Any(scope *Scope, input tf.Output, axis tf.Output, optional ...AnyAttr) (ou
return op.Output(0)
}
-// Compute the Hurwitz zeta function \\(\zeta(x, q)\\).
-//
-// The Hurwitz zeta function is defined as:
-//
-//
-// \\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\\)
-func Zeta(scope *Scope, x tf.Output, q tf.Output) (z tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "Zeta",
- Input: []tf.Input{
- x, q,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// Creates a dataset that skips `count` elements from the `input_dataset`.
-//
-// Arguments:
-//
-// count: A scalar representing the number of elements from the `input_dataset`
-// that should be skipped. If count is -1, skips everything.
-//
+// Computes arctangent of `y/x` element-wise, respecting signs of the arguments.
//
-func SkipDataset(scope *Scope, input_dataset tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
+// This is the angle \( \theta \in [-\pi, \pi] \) such that
+// \[ x = r \cos(\theta) \]
+// and
+// \[ y = r \sin(\theta) \]
+// where \(r = \sqrt(x^2 + y^2) \).
+func Atan2(scope *Scope, y tf.Output, x tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
opspec := tf.OpSpec{
- Type: "SkipDataset",
+ Type: "Atan2",
Input: []tf.Input{
- input_dataset, count,
+ y, x,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// ImagAttr is an optional argument to Imag.
-type ImagAttr func(optionalAttr)
-
-// ImagTout sets the optional Tout attribute to value.
-// If not specified, defaults to DT_FLOAT
-func ImagTout(value tf.DataType) ImagAttr {
- return func(m optionalAttr) {
- m["Tout"] = value
- }
-}
-
-// Returns the imaginary part of a complex number.
-//
-// Given a tensor `input` of complex numbers, this operation returns a tensor of
-// type `float` that is the imaginary part of each element in `input`. All
-// elements in `input` must be complex numbers of the form \\(a + bj\\), where *a*
-// is the real part and *b* is the imaginary part returned by this operation.
-//
-// For example:
-//
-// ```
-// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
-// tf.imag(input) ==> [4.75, 5.75]
-// ```
-func Imag(scope *Scope, input tf.Output, optional ...ImagAttr) (output tf.Output) {
+// Return a tensor with the same shape and contents as the input tensor or value.
+func Identity(scope *Scope, input tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "Imag",
+ Type: "Identity",
Input: []tf.Input{
input,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// ComplexAttr is an optional argument to Complex.
-type ComplexAttr func(optionalAttr)
-
-// ComplexTout sets the optional Tout attribute to value.
-// If not specified, defaults to DT_COMPLEX64
-func ComplexTout(value tf.DataType) ComplexAttr {
- return func(m optionalAttr) {
- m["Tout"] = value
- }
-}
-
-// Converts two real numbers to a complex number.
+// Gather slices from `params` axis `axis` according to `indices`.
//
-// Given a tensor `real` representing the real part of a complex number, and a
-// tensor `imag` representing the imaginary part of a complex number, this
-// operation returns complex numbers elementwise of the form \\(a + bj\\), where
-// *a* represents the `real` part and *b* represents the `imag` part.
+// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
+// Produces an output tensor with shape `params.shape[:axis] + indices.shape +
+// params.shape[axis + 1:]` where:
//
-// The input tensors `real` and `imag` must have the same shape.
+// ```python
+// # Scalar indices (output is rank(params) - 1).
+// output[a_0, ..., a_n, b_0, ..., b_n] =
+// params[a_0, ..., a_n, indices, b_0, ..., b_n]
//
-// For example:
+// # Vector indices (output is rank(params)).
+// output[a_0, ..., a_n, i, b_0, ..., b_n] =
+// params[a_0, ..., a_n, indices[i], b_0, ..., b_n]
//
+// # Higher rank indices (output is rank(params) + rank(indices) - 1).
+// output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
+// params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
// ```
-// # tensor 'real' is [2.25, 3.25]
-// # tensor `imag` is [4.75, 5.75]
-// tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
-// ```
-func Complex(scope *Scope, real tf.Output, imag tf.Output, optional ...ComplexAttr) (out tf.Output) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
- opspec := tf.OpSpec{
- Type: "Complex",
- Input: []tf.Input{
- real, imag,
- },
- Attrs: attrs,
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// Inverse real-valued fast Fourier transform.
-//
-// Computes the inverse 1-dimensional discrete Fourier transform of a real-valued
-// signal over the inner-most dimension of `input`.
-//
-// The inner-most dimension of `input` is assumed to be the result of `RFFT`: the
-// `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If
-// `fft_length` is not provided, it is computed from the size of the inner-most
-// dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to
-// compute `input` is odd, it should be provided since it cannot be inferred
-// properly.
-//
-// Along the axis `IRFFT` is computed on, if `fft_length / 2 + 1` is smaller
-// than the corresponding dimension of `input`, the dimension is cropped. If it is
-// larger, the dimension is padded with zeros.
-//
-// Arguments:
-// input: A complex64 tensor.
-// fft_length: An int32 tensor of shape [1]. The FFT length.
-//
-// Returns A float32 tensor of the same rank as `input`. The inner-most
-// dimension of `input` is replaced with the `fft_length` samples of its inverse
-// 1D Fourier transform.
//
-// @compatibility(numpy)
-// Equivalent to np.fft.irfft
-// @end_compatibility
-func IRFFT(scope *Scope, input tf.Output, fft_length tf.Output) (output tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "IRFFT",
- Input: []tf.Input{
- input, fft_length,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// Adds a value to the current value of a variable.
-//
-// Any ReadVariableOp which depends directly or indirectly on this assign is
-// guaranteed to see the incremented value or a subsequent newer one.
-//
-// Outputs the incremented value, which can be used to totally order the
-// increments to this variable.
+// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+// <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
+// </div>
//
// Arguments:
-// resource: handle to the resource in which to store the variable.
-// value: the value by which the variable will be incremented.
+// params: The tensor from which to gather values. Must be at least rank
+// `axis + 1`.
+// indices: Index tensor. Must be in range `[0, params.shape[axis])`.
+// axis: The axis in `params` to gather `indices` from. Defaults to the first
+// dimension. Supports negative indexes.
//
-// Returns the created operation.
-func AssignAddVariableOp(scope *Scope, resource tf.Output, value tf.Output) (o *tf.Operation) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "AssignAddVariableOp",
- Input: []tf.Input{
- resource, value,
- },
- }
- return scope.AddOperation(opspec)
-}
-
-// Computes inverse hyperbolic sine of x element-wise.
-func Asinh(scope *Scope, x tf.Output) (y tf.Output) {
+// Returns Values from `params` gathered from indices given by `indices`, with
+// shape `params.shape[:axis] + indices.shape + params.shape[axis + 1:]`.
+func GatherV2(scope *Scope, params tf.Output, indices tf.Output, axis tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Asinh",
+ Type: "GatherV2",
Input: []tf.Input{
- x,
+ params, indices, axis,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Real-valued fast Fourier transform.
-//
-// Computes the 1-dimensional discrete Fourier transform of a real-valued signal
-// over the inner-most dimension of `input`.
-//
-// Since the DFT of a real signal is Hermitian-symmetric, `RFFT` only returns the
-// `fft_length / 2 + 1` unique components of the FFT: the zero-frequency term,
-// followed by the `fft_length / 2` positive-frequency terms.
-//
-// Along the axis `RFFT` is computed on, if `fft_length` is smaller than the
-// corresponding dimension of `input`, the dimension is cropped. If it is larger,
-// the dimension is padded with zeros.
+// Converts the given `resource_handle` representing an iterator to a variant tensor.
//
// Arguments:
-// input: A float32 tensor.
-// fft_length: An int32 tensor of shape [1]. The FFT length.
-//
-// Returns A complex64 tensor of the same rank as `input`. The inner-most
-// dimension of `input` is replaced with the `fft_length / 2 + 1` unique
-// frequency components of its 1D Fourier transform.
+// resource_handle: A handle to an iterator resource.
//
-// @compatibility(numpy)
-// Equivalent to np.fft.rfft
-// @end_compatibility
-func RFFT(scope *Scope, input tf.Output, fft_length tf.Output) (output tf.Output) {
+// Returns A variant tensor storing the state of the iterator contained in the
+// resource.
+func SerializeIterator(scope *Scope, resource_handle tf.Output) (serialized tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "RFFT",
+ Type: "SerializeIterator",
Input: []tf.Input{
- input, fft_length,
+ resource_handle,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// OrderedMapStageAttr is an optional argument to OrderedMapStage.
-type OrderedMapStageAttr func(optionalAttr)
+// FIFOQueueV2Attr is an optional argument to FIFOQueueV2.
+type FIFOQueueV2Attr func(optionalAttr)
-// OrderedMapStageCapacity sets the optional capacity attribute to value.
+// FIFOQueueV2Shapes sets the optional shapes attribute to value.
//
-// value: Maximum number of elements in the Staging Area. If > 0, inserts
-// on the container will block when the capacity is reached.
-// If not specified, defaults to 0
+// value: The shape of each component in a value. The length of this attr must
+// be either 0 or the same as the length of component_types. If the length of
+// this attr is 0, the shapes of queue elements are not constrained, and
+// only one element may be dequeued at a time.
+// If not specified, defaults to <>
//
-// REQUIRES: value >= 0
-func OrderedMapStageCapacity(value int64) OrderedMapStageAttr {
+// REQUIRES: len(value) >= 0
+func FIFOQueueV2Shapes(value []tf.Shape) FIFOQueueV2Attr {
return func(m optionalAttr) {
- m["capacity"] = value
+ m["shapes"] = value
}
}
-// OrderedMapStageMemoryLimit sets the optional memory_limit attribute to value.
-// If not specified, defaults to 0
+// FIFOQueueV2Capacity sets the optional capacity attribute to value.
//
-// REQUIRES: value >= 0
-func OrderedMapStageMemoryLimit(value int64) OrderedMapStageAttr {
+// value: The upper bound on the number of elements in this queue.
+// Negative numbers mean no limit.
+// If not specified, defaults to -1
+func FIFOQueueV2Capacity(value int64) FIFOQueueV2Attr {
return func(m optionalAttr) {
- m["memory_limit"] = value
+ m["capacity"] = value
}
}
-// OrderedMapStageContainer sets the optional container attribute to value.
+// FIFOQueueV2Container sets the optional container attribute to value.
//
-// value: If non-empty, this queue is placed in the given container. Otherwise,
-// a default container is used.
+// value: If non-empty, this queue is placed in the given container.
+// Otherwise, a default container is used.
// If not specified, defaults to ""
-func OrderedMapStageContainer(value string) OrderedMapStageAttr {
+func FIFOQueueV2Container(value string) FIFOQueueV2Attr {
return func(m optionalAttr) {
m["container"] = value
}
}
-// OrderedMapStageSharedName sets the optional shared_name attribute to value.
+// FIFOQueueV2SharedName sets the optional shared_name attribute to value.
//
-// value: It is necessary to match this name to the matching Unstage Op.
+// value: If non-empty, this queue will be shared under the given name
+// across multiple sessions.
// If not specified, defaults to ""
-func OrderedMapStageSharedName(value string) OrderedMapStageAttr {
+func FIFOQueueV2SharedName(value string) FIFOQueueV2Attr {
return func(m optionalAttr) {
m["shared_name"] = value
}
}
-// Stage (key, values) in the underlying container which behaves like a ordered
-//
-// associative container. Elements are ordered by key.
+// A queue that produces elements in first-in first-out order.
//
// Arguments:
-// key: int64
-//
-// values: a list of tensors
-// dtypes A list of data types that inserted values should adhere to.
-//
+// component_types: The type of each component in a value.
//
-// Returns the created operation.
-func OrderedMapStage(scope *Scope, key tf.Output, indices tf.Output, values []tf.Output, dtypes []tf.DataType, optional ...OrderedMapStageAttr) (o *tf.Operation) {
+// Returns The handle to the queue.
+func FIFOQueueV2(scope *Scope, component_types []tf.DataType, optional ...FIFOQueueV2Attr) (handle tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtypes": dtypes}
+ attrs := map[string]interface{}{"component_types": component_types}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "OrderedMapStage",
- Input: []tf.Input{
- key, indices, tf.OutputList(values),
- },
- Attrs: attrs,
- }
- return scope.AddOperation(opspec)
-}
+ Type: "FIFOQueueV2",
-// Computes the gradient for the tanh of `x` wrt its input.
-//
-// Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy`
-// is the corresponding input gradient.
-func TanhGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "TanhGrad",
- Input: []tf.Input{
- y, dy,
- },
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Outputs all keys and values in the table.
-//
-// Arguments:
-// table_handle: Handle to the table.
-//
-//
-//
-// Returns Vector of all keys present in the table.Tensor of all values in the table. Indexed in parallel with `keys`.
-func LookupTableExportV2(scope *Scope, table_handle tf.Output, Tkeys tf.DataType, Tvalues tf.DataType) (keys tf.Output, values tf.Output) {
+// Produces a summary of any statistics recorded by the given statistics manager.
+func StatsAggregatorSummary(scope *Scope, iterator tf.Output) (summary tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"Tkeys": Tkeys, "Tvalues": Tvalues}
opspec := tf.OpSpec{
- Type: "LookupTableExportV2",
+ Type: "StatsAggregatorSummary",
Input: []tf.Input{
- table_handle,
+ iterator,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
+ return op.Output(0)
}
-// Converts each string in the input Tensor to its hash mod by a number of buckets.
+// Performs a padding as a preprocess during a convolution.
//
-// The hash function is deterministic on the content of the string within the
-// process and will never change. However, it is not suitable for cryptography.
-// This function may be used when CPU time is scarce and inputs are trusted or
-// unimportant. There is a risk of adversaries constructing inputs that all hash
-// to the same bucket. To prevent this problem, use a strong hash function with
-// `tf.string_to_hash_bucket_strong`.
+// Similar to FusedResizeAndPadConv2d, this op allows for an optimized
+// implementation where the spatial padding transformation stage is fused with the
+// im2col lookup, but in this case without the bilinear filtering required for
+// resizing. Fusing the padding prevents the need to write out the intermediate
+// results as whole tensors, reducing memory pressure, and we can get some latency
+// gains by merging the transformation calculations.
+// The data_format attribute for Conv2D isn't supported by this op, and 'NHWC'
+// order is used instead.
+// Internally this op uses a single per-graph scratch buffer, which means that it
+// will block if multiple versions are being run in parallel. This is because this
+// operator is primarily an optimization to minimize memory usage.
//
// Arguments:
-// input: The strings to assign a hash bucket.
-// num_buckets: The number of buckets.
+// input: 4-D with shape `[batch, in_height, in_width, in_channels]`.
+// paddings: A two-column matrix specifying the padding sizes. The number of
+// rows must be the same as the rank of `input`.
+// filter: 4-D with shape
+// `[filter_height, filter_width, in_channels, out_channels]`.
//
-// Returns A Tensor of the same shape as the input `string_tensor`.
-func StringToHashBucketFast(scope *Scope, input tf.Output, num_buckets int64) (output tf.Output) {
+// strides: 1-D of length 4. The stride of the sliding window for each dimension
+// of `input`. Must be in the same order as the dimension specified with format.
+// padding: The type of padding algorithm to use.
+func FusedPadConv2D(scope *Scope, input tf.Output, paddings tf.Output, filter tf.Output, mode string, strides []int64, padding string) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"num_buckets": num_buckets}
+ attrs := map[string]interface{}{"mode": mode, "strides": strides, "padding": padding}
opspec := tf.OpSpec{
- Type: "StringToHashBucketFast",
+ Type: "FusedPadConv2D",
Input: []tf.Input{
- input,
+ input, paddings, filter,
},
Attrs: attrs,
}
@@ -21379,73 +21259,18 @@ func StringToHashBucketFast(scope *Scope, input tf.Output, num_buckets int64) (o
return op.Output(0)
}
-// TensorArrayGatherV3Attr is an optional argument to TensorArrayGatherV3.
-type TensorArrayGatherV3Attr func(optionalAttr)
+// Conv2DBackpropInputAttr is an optional argument to Conv2DBackpropInput.
+type Conv2DBackpropInputAttr func(optionalAttr)
-// TensorArrayGatherV3ElementShape sets the optional element_shape attribute to value.
-//
-// value: The expected shape of an element, if known. Used to
-// validate the shapes of TensorArray elements. If this shape is not
-// fully specified, gathering zero-size TensorArrays is an error.
-// If not specified, defaults to <unknown_rank:true >
-func TensorArrayGatherV3ElementShape(value tf.Shape) TensorArrayGatherV3Attr {
+// Conv2DBackpropInputUseCudnnOnGpu sets the optional use_cudnn_on_gpu attribute to value.
+// If not specified, defaults to true
+func Conv2DBackpropInputUseCudnnOnGpu(value bool) Conv2DBackpropInputAttr {
return func(m optionalAttr) {
- m["element_shape"] = value
- }
-}
-
-// Gather specific elements from the TensorArray into output `value`.
-//
-// All elements selected by `indices` must have the same shape.
-//
-// Arguments:
-// handle: The handle to a TensorArray.
-// indices: The locations in the TensorArray from which to read tensor elements.
-// flow_in: A float scalar that enforces proper chaining of operations.
-// dtype: The type of the elem that is returned.
-//
-// Returns All of the elements in the TensorArray, concatenated along a new
-// axis (the new dimension 0).
-func TensorArrayGatherV3(scope *Scope, handle tf.Output, indices tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayGatherV3Attr) (value tf.Output) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{"dtype": dtype}
- for _, a := range optional {
- a(attrs)
- }
- opspec := tf.OpSpec{
- Type: "TensorArrayGatherV3",
- Input: []tf.Input{
- handle, indices, flow_in,
- },
- Attrs: attrs,
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// Deprecated. Disallowed in GraphDef version >= 2.
-//
-// DEPRECATED at GraphDef version 2: Use AdjustContrastv2 instead
-func AdjustContrast(scope *Scope, images tf.Output, contrast_factor tf.Output, min_value tf.Output, max_value tf.Output) (output tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "AdjustContrast",
- Input: []tf.Input{
- images, contrast_factor, min_value, max_value,
- },
+ m["use_cudnn_on_gpu"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// MaxPoolGradGradAttr is an optional argument to MaxPoolGradGrad.
-type MaxPoolGradGradAttr func(optionalAttr)
-
-// MaxPoolGradGradDataFormat sets the optional data_format attribute to value.
+// Conv2DBackpropInputDataFormat sets the optional data_format attribute to value.
//
// value: Specify the data format of the input and output data. With the
// default format "NHWC", the data is stored in the order of:
@@ -21453,101 +21278,13 @@ type MaxPoolGradGradAttr func(optionalAttr)
// Alternatively, the format could be "NCHW", the data storage order of:
// [batch, in_channels, in_height, in_width].
// If not specified, defaults to "NHWC"
-func MaxPoolGradGradDataFormat(value string) MaxPoolGradGradAttr {
- return func(m optionalAttr) {
- m["data_format"] = value
- }
-}
-
-// Computes second-order gradients of the maxpooling function.
-//
-// Arguments:
-// orig_input: The original input tensor.
-// orig_output: The original output tensor.
-// grad: 4-D. Gradients of gradients w.r.t. the input of `max_pool`.
-// ksize: The size of the window for each dimension of the input tensor.
-// strides: The stride of the sliding window for each dimension of the
-// input tensor.
-// padding: The type of padding algorithm to use.
-//
-// Returns Gradients of gradients w.r.t. the input to `max_pool`.
-func MaxPoolGradGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolGradGradAttr) (output tf.Output) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
- for _, a := range optional {
- a(attrs)
- }
- opspec := tf.OpSpec{
- Type: "MaxPoolGradGrad",
- Input: []tf.Input{
- orig_input, orig_output, grad,
- },
- Attrs: attrs,
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// 3D real-valued fast Fourier transform.
-//
-// Computes the 3-dimensional discrete Fourier transform of a real-valued signal
-// over the inner-most 3 dimensions of `input`.
-//
-// Since the DFT of a real signal is Hermitian-symmetric, `RFFT3D` only returns the
-// `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
-// of `output`: the zero-frequency term, followed by the `fft_length / 2`
-// positive-frequency terms.
-//
-// Along each axis `RFFT3D` is computed on, if `fft_length` is smaller than the
-// corresponding dimension of `input`, the dimension is cropped. If it is larger,
-// the dimension is padded with zeros.
-//
-// Arguments:
-// input: A float32 tensor.
-// fft_length: An int32 tensor of shape [3]. The FFT length for each dimension.
-//
-// Returns A complex64 tensor of the same rank as `input`. The inner-most 3
-// dimensions of `input` are replaced with the their 3D Fourier transform. The
-// inner-most dimension contains `fft_length / 2 + 1` unique frequency
-// components.
-//
-// @compatibility(numpy)
-// Equivalent to np.fft.rfftn with 3 dimensions.
-// @end_compatibility
-func RFFT3D(scope *Scope, input tf.Output, fft_length tf.Output) (output tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "RFFT3D",
- Input: []tf.Input{
- input, fft_length,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// DepthwiseConv2dNativeAttr is an optional argument to DepthwiseConv2dNative.
-type DepthwiseConv2dNativeAttr func(optionalAttr)
-
-// DepthwiseConv2dNativeDataFormat sets the optional data_format attribute to value.
-//
-// value: Specify the data format of the input and output data. With the
-// default format "NHWC", the data is stored in the order of:
-// [batch, height, width, channels].
-// Alternatively, the format could be "NCHW", the data storage order of:
-// [batch, channels, height, width].
-// If not specified, defaults to "NHWC"
-func DepthwiseConv2dNativeDataFormat(value string) DepthwiseConv2dNativeAttr {
+func Conv2DBackpropInputDataFormat(value string) Conv2DBackpropInputAttr {
return func(m optionalAttr) {
m["data_format"] = value
}
}
-// DepthwiseConv2dNativeDilations sets the optional dilations attribute to value.
+// Conv2DBackpropInputDilations sets the optional dilations attribute to value.
//
// value: 1-D tensor of length 4. The dilation factor for each dimension of
// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
@@ -21555,40 +21292,29 @@ func DepthwiseConv2dNativeDataFormat(value string) DepthwiseConv2dNativeAttr {
// `data_format`, see above for details. Dilations in the batch and depth
// dimensions must be 1.
// If not specified, defaults to <i:1 i:1 i:1 i:1 >
-func DepthwiseConv2dNativeDilations(value []int64) DepthwiseConv2dNativeAttr {
+func Conv2DBackpropInputDilations(value []int64) Conv2DBackpropInputAttr {
return func(m optionalAttr) {
m["dilations"] = value
}
}
-// Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors.
-//
-// Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
-// and a filter / kernel tensor of shape
-// `[filter_height, filter_width, in_channels, channel_multiplier]`, containing
-// `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies
-// a different filter to each input channel (expanding from 1 channel to
-// `channel_multiplier` channels for each), then concatenates the results
-// together. Thus, the output has `in_channels * channel_multiplier` channels.
-//
-// ```
-// for k in 0..in_channels-1
-// for q in 0..channel_multiplier-1
-// output[b, i, j, k * channel_multiplier + q] =
-// sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *
-// filter[di, dj, k, q]
-// ```
-//
-// Must have `strides[0] = strides[3] = 1`. For the most common case of the same
-// horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
+// Computes the gradients of convolution with respect to the input.
//
// Arguments:
-//
-//
-// strides: 1-D of length 4. The stride of the sliding window for each dimension
-// of `input`.
+// input_sizes: An integer vector representing the shape of `input`,
+// where `input` is a 4-D `[batch, height, width, channels]` tensor.
+// filter: 4-D with shape
+// `[filter_height, filter_width, in_channels, out_channels]`.
+// out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`.
+// Gradients w.r.t. the output of the convolution.
+// strides: The stride of the sliding window for each dimension of the input
+// of the convolution. Must be in the same order as the dimension specified with
+// format.
// padding: The type of padding algorithm to use.
-func DepthwiseConv2dNative(scope *Scope, input tf.Output, filter tf.Output, strides []int64, padding string, optional ...DepthwiseConv2dNativeAttr) (output tf.Output) {
+//
+// Returns 4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient
+// w.r.t. the input of the convolution.
+func Conv2DBackpropInput(scope *Scope, input_sizes tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv2DBackpropInputAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -21597,38 +21323,9 @@ func DepthwiseConv2dNative(scope *Scope, input tf.Output, filter tf.Output, stri
a(attrs)
}
opspec := tf.OpSpec{
- Type: "DepthwiseConv2dNative",
- Input: []tf.Input{
- input, filter,
- },
- Attrs: attrs,
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// Computes the gradients of 3-D convolution with respect to the input.
-//
-// DEPRECATED at GraphDef version 10: Use Conv3DBackpropInputV2
-//
-// Arguments:
-// input: Shape `[batch, depth, rows, cols, in_channels]`.
-// filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
-// `in_channels` must match between `input` and `filter`.
-// out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
-// out_channels]`.
-// strides: 1-D tensor of length 5. The stride of the sliding window for each
-// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
-// padding: The type of padding algorithm to use.
-func Conv3DBackpropInput(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string) (output tf.Output) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{"strides": strides, "padding": padding}
- opspec := tf.OpSpec{
- Type: "Conv3DBackpropInput",
+ Type: "Conv2DBackpropInput",
Input: []tf.Input{
- input, filter, out_backprop,
+ input_sizes, filter, out_backprop,
},
Attrs: attrs,
}
@@ -21636,205 +21333,126 @@ func Conv3DBackpropInput(scope *Scope, input tf.Output, filter tf.Output, out_ba
return op.Output(0)
}
-// ReverseSequenceAttr is an optional argument to ReverseSequence.
-type ReverseSequenceAttr func(optionalAttr)
-
-// ReverseSequenceBatchDim sets the optional batch_dim attribute to value.
-//
-// value: The dimension along which reversal is performed.
-// If not specified, defaults to 0
-func ReverseSequenceBatchDim(value int64) ReverseSequenceAttr {
- return func(m optionalAttr) {
- m["batch_dim"] = value
- }
-}
-
-// Reverses variable length slices.
+// Interleave the values from the `data` tensors into a single tensor.
//
-// This op first slices `input` along the dimension `batch_dim`, and for each
-// slice `i`, reverses the first `seq_lengths[i]` elements along
-// the dimension `seq_dim`.
+// Builds a merged tensor such that
//
-// The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`,
-// and `seq_lengths` must be a vector of length `input.dims[batch_dim]`.
+// ```python
+// merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
+// ```
//
-// The output slice `i` along dimension `batch_dim` is then given by input
-// slice `i`, with the first `seq_lengths[i]` slices along dimension
-// `seq_dim` reversed.
+// For example, if each `indices[m]` is scalar or vector, we have
//
-// For example:
+// ```python
+// # Scalar indices:
+// merged[indices[m], ...] = data[m][...]
//
+// # Vector indices:
+// merged[indices[m][i], ...] = data[m][i, ...]
// ```
-// # Given this:
-// batch_dim = 0
-// seq_dim = 1
-// input.dims = (4, 8, ...)
-// seq_lengths = [7, 2, 3, 5]
//
-// # then slices of input are reversed on seq_dim, but only up to seq_lengths:
-// output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]
-// output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]
-// output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]
-// output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]
-//
-// # while entries past seq_lens are copied through:
-// output[0, 7:, :, ...] = input[0, 7:, :, ...]
-// output[1, 2:, :, ...] = input[1, 2:, :, ...]
-// output[2, 3:, :, ...] = input[2, 3:, :, ...]
-// output[3, 2:, :, ...] = input[3, 2:, :, ...]
-// ```
+// Each `data[i].shape` must start with the corresponding `indices[i].shape`,
+// and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we
+// must have `data[i].shape = indices[i].shape + constant`. In terms of this
+// `constant`, the output shape is
//
-// In contrast, if:
+// merged.shape = [max(indices)] + constant
//
-// ```
-// # Given this:
-// batch_dim = 2
-// seq_dim = 0
-// input.dims = (8, ?, 4, ...)
-// seq_lengths = [7, 2, 3, 5]
+// Values are merged in order, so if an index appears in both `indices[m][i]` and
+// `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the
+// merged result. If you do not need this guarantee, ParallelDynamicStitch might
+// perform better on some devices.
//
-// # then slices of input are reversed on seq_dim, but only up to seq_lengths:
-// output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]
-// output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]
-// output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]
-// output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]
+// For example:
//
-// # while entries past seq_lens are copied through:
-// output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]
-// output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]
-// output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]
-// output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]
+// ```python
+// indices[0] = 6
+// indices[1] = [4, 1]
+// indices[2] = [[5, 2], [0, 3]]
+// data[0] = [61, 62]
+// data[1] = [[41, 42], [11, 12]]
+// data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
+// merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
+// [51, 52], [61, 62]]
// ```
//
-// Arguments:
-// input: The input to reverse.
-// seq_lengths: 1-D with length `input.dims(batch_dim)` and
-// `max(seq_lengths) <= input.dims(seq_dim)`
-// seq_dim: The dimension which is partially reversed.
+// This method can be used to merge partitions created by `dynamic_partition`
+// as illustrated on the following example:
//
-// Returns The partially reversed input. It has the same shape as `input`.
-func ReverseSequence(scope *Scope, input tf.Output, seq_lengths tf.Output, seq_dim int64, optional ...ReverseSequenceAttr) (output tf.Output) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{"seq_dim": seq_dim}
- for _, a := range optional {
- a(attrs)
- }
- opspec := tf.OpSpec{
- Type: "ReverseSequence",
- Input: []tf.Input{
- input, seq_lengths,
- },
- Attrs: attrs,
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// Computes the gradient for the rsqrt of `x` wrt its input.
+// ```python
+// # Apply function (increments x_i) on elements for which a certain condition
+// # apply (x_i != -1 in this example).
+// x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
+// condition_mask=tf.not_equal(x,tf.constant(-1.))
+// partitioned_data = tf.dynamic_partition(
+// x, tf.cast(condition_mask, tf.int32) , 2)
+// partitioned_data[1] = partitioned_data[1] + 1.0
+// condition_indices = tf.dynamic_partition(
+// tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
+// x = tf.dynamic_stitch(condition_indices, partitioned_data)
+// # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
+// # unchanged.
+// ```
//
-// Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy`
-// is the corresponding input gradient.
-func RsqrtGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
+// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+// <img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
+// </div>
+func DynamicStitch(scope *Scope, indices []tf.Output, data []tf.Output) (merged tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "RsqrtGrad",
+ Type: "DynamicStitch",
Input: []tf.Input{
- y, dy,
+ tf.OutputList(indices), tf.OutputList(data),
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Computes the gradients of 3-D convolution with respect to the filter.
-//
-// DEPRECATED at GraphDef version 10: Use Conv3DBackpropFilterV2
+// Returns the truth value of (x == y) element-wise.
//
-// Arguments:
-// input: Shape `[batch, depth, rows, cols, in_channels]`.
-// filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
-// `in_channels` must match between `input` and `filter`.
-// out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
-// out_channels]`.
-// strides: 1-D tensor of length 5. The stride of the sliding window for each
-// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
-// padding: The type of padding algorithm to use.
-func Conv3DBackpropFilter(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string) (output tf.Output) {
+// *NOTE*: `Equal` supports broadcasting. More about broadcasting
+// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
+func Equal(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"strides": strides, "padding": padding}
opspec := tf.OpSpec{
- Type: "Conv3DBackpropFilter",
+ Type: "Equal",
Input: []tf.Input{
- input, filter, out_backprop,
+ x, y,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Conv3DBackpropInputV2Attr is an optional argument to Conv3DBackpropInputV2.
-type Conv3DBackpropInputV2Attr func(optionalAttr)
-
-// Conv3DBackpropInputV2DataFormat sets the optional data_format attribute to value.
-//
-// value: The data format of the input and output data. With the
-// default format "NDHWC", the data is stored in the order of:
-// [batch, in_depth, in_height, in_width, in_channels].
-// Alternatively, the format could be "NCDHW", the data storage order is:
-// [batch, in_channels, in_depth, in_height, in_width].
-// If not specified, defaults to "NDHWC"
-func Conv3DBackpropInputV2DataFormat(value string) Conv3DBackpropInputV2Attr {
- return func(m optionalAttr) {
- m["data_format"] = value
- }
-}
+// TensorArrayGatherV2Attr is an optional argument to TensorArrayGatherV2.
+type TensorArrayGatherV2Attr func(optionalAttr)
-// Conv3DBackpropInputV2Dilations sets the optional dilations attribute to value.
-//
-// value: 1-D tensor of length 5. The dilation factor for each dimension of
-// `input`. If set to k > 1, there will be k-1 skipped cells between each
-// filter element on that dimension. The dimension order is determined by the
-// value of `data_format`, see above for details. Dilations in the batch and
-// depth dimensions must be 1.
-// If not specified, defaults to <i:1 i:1 i:1 i:1 i:1 >
-func Conv3DBackpropInputV2Dilations(value []int64) Conv3DBackpropInputV2Attr {
+// TensorArrayGatherV2ElementShape sets the optional element_shape attribute to value.
+// If not specified, defaults to <unknown_rank:true >
+func TensorArrayGatherV2ElementShape(value tf.Shape) TensorArrayGatherV2Attr {
return func(m optionalAttr) {
- m["dilations"] = value
+ m["element_shape"] = value
}
}
-// Computes the gradients of 3-D convolution with respect to the input.
-//
-// Arguments:
-// input_sizes: An integer vector representing the tensor shape of `input`,
-// where `input` is a 5-D
-// `[batch, depth, rows, cols, in_channels]` tensor.
-// filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
-// `in_channels` must match between `input` and `filter`.
-// out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
-// out_channels]`.
-// strides: 1-D tensor of length 5. The stride of the sliding window for each
-// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
-// padding: The type of padding algorithm to use.
-func Conv3DBackpropInputV2(scope *Scope, input_sizes tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv3DBackpropInputV2Attr) (output tf.Output) {
+// Deprecated. Use TensorArrayGatherV3
+func TensorArrayGatherV2(scope *Scope, handle tf.Output, indices tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayGatherV2Attr) (value tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"strides": strides, "padding": padding}
+ attrs := map[string]interface{}{"dtype": dtype}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "Conv3DBackpropInputV2",
+ Type: "TensorArrayGatherV2",
Input: []tf.Input{
- input_sizes, filter, out_backprop,
+ handle, indices, flow_in,
},
Attrs: attrs,
}
@@ -21842,276 +21460,287 @@ func Conv3DBackpropInputV2(scope *Scope, input_sizes tf.Output, filter tf.Output
return op.Output(0)
}
-// Returns a tensor of ones with the same shape and type as x.
+// Interleave the values from the `data` tensors into a single tensor.
//
-// Arguments:
-// x: a tensor of type T.
+// Builds a merged tensor such that
//
-// Returns a tensor of the same shape and type as x but filled with ones.
-func OnesLike(scope *Scope, x tf.Output) (y tf.Output) {
+// ```python
+// merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
+// ```
+//
+// For example, if each `indices[m]` is scalar or vector, we have
+//
+// ```python
+// # Scalar indices:
+// merged[indices[m], ...] = data[m][...]
+//
+// # Vector indices:
+// merged[indices[m][i], ...] = data[m][i, ...]
+// ```
+//
+// Each `data[i].shape` must start with the corresponding `indices[i].shape`,
+// and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we
+// must have `data[i].shape = indices[i].shape + constant`. In terms of this
+// `constant`, the output shape is
+//
+// merged.shape = [max(indices)] + constant
+//
+// Values may be merged in parallel, so if an index appears in both `indices[m][i]`
+// and `indices[n][j]`, the result may be invalid. This differs from the normal
+// DynamicStitch operator that defines the behavior in that case.
+//
+// For example:
+//
+// ```python
+// indices[0] = 6
+// indices[1] = [4, 1]
+// indices[2] = [[5, 2], [0, 3]]
+// data[0] = [61, 62]
+// data[1] = [[41, 42], [11, 12]]
+// data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
+// merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
+// [51, 52], [61, 62]]
+// ```
+//
+// This method can be used to merge partitions created by `dynamic_partition`
+// as illustrated on the following example:
+//
+// ```python
+// # Apply function (increments x_i) on elements for which a certain condition
+// # apply (x_i != -1 in this example).
+// x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
+// condition_mask=tf.not_equal(x,tf.constant(-1.))
+// partitioned_data = tf.dynamic_partition(
+// x, tf.cast(condition_mask, tf.int32) , 2)
+// partitioned_data[1] = partitioned_data[1] + 1.0
+// condition_indices = tf.dynamic_partition(
+// tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
+// x = tf.dynamic_stitch(condition_indices, partitioned_data)
+// # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
+// # unchanged.
+// ```
+//
+// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+// <img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
+// </div>
+func ParallelDynamicStitch(scope *Scope, indices []tf.Output, data []tf.Output) (merged tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "OnesLike",
+ Type: "ParallelDynamicStitch",
Input: []tf.Input{
- x,
+ tf.OutputList(indices), tf.OutputList(data),
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Returns element-wise remainder of division. This emulates C semantics in that
-//
-// the result here is consistent with a truncating divide. E.g.
-// `tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`.
+// Computes the gradient for the inverse of `x` wrt its input.
//
-// *NOTE*: `Mod` supports broadcasting. More about broadcasting
-// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-func Mod(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
+// Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
+// is the corresponding input gradient.
+func InvGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Mod",
+ Type: "InvGrad",
Input: []tf.Input{
- x, y,
+ y, dy,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// QuantizeAndDequantizeV3Attr is an optional argument to QuantizeAndDequantizeV3.
-type QuantizeAndDequantizeV3Attr func(optionalAttr)
+// StridedSliceAttr is an optional argument to StridedSlice.
+type StridedSliceAttr func(optionalAttr)
-// QuantizeAndDequantizeV3SignedInput sets the optional signed_input attribute to value.
-// If not specified, defaults to true
-func QuantizeAndDequantizeV3SignedInput(value bool) QuantizeAndDequantizeV3Attr {
+// StridedSliceBeginMask sets the optional begin_mask attribute to value.
+//
+// value: a bitmask where a bit i being 1 means to ignore the begin
+// value and instead use the largest interval possible. At runtime
+// begin[i] will be replaced with `[0, n-1) if `stride[i] > 0` or
+// `[-1, n-1]` if `stride[i] < 0`
+// If not specified, defaults to 0
+func StridedSliceBeginMask(value int64) StridedSliceAttr {
return func(m optionalAttr) {
- m["signed_input"] = value
+ m["begin_mask"] = value
}
}
-// QuantizeAndDequantizeV3RangeGiven sets the optional range_given attribute to value.
-// If not specified, defaults to true
-func QuantizeAndDequantizeV3RangeGiven(value bool) QuantizeAndDequantizeV3Attr {
+// StridedSliceEndMask sets the optional end_mask attribute to value.
+//
+// value: analogous to `begin_mask`
+// If not specified, defaults to 0
+func StridedSliceEndMask(value int64) StridedSliceAttr {
return func(m optionalAttr) {
- m["range_given"] = value
+ m["end_mask"] = value
}
}
-// Quantizes then dequantizes a tensor.
+// StridedSliceEllipsisMask sets the optional ellipsis_mask attribute to value.
//
-// This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a
-// tensor, so its value can change during training.
-func QuantizeAndDequantizeV3(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, num_bits tf.Output, optional ...QuantizeAndDequantizeV3Attr) (output tf.Output) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
- opspec := tf.OpSpec{
- Type: "QuantizeAndDequantizeV3",
- Input: []tf.Input{
- input, input_min, input_max, num_bits,
- },
- Attrs: attrs,
+// value: a bitmask where bit `i` being 1 means the `i`th
+// position is actually an ellipsis. One bit at most can be 1.
+// If `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)`
+// is provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis
+// implicitly creates as many range specifications as necessary to fully
+// specify the sliced range for every dimension. For example for a 4-dimensional
+// tensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`.
+// If not specified, defaults to 0
+func StridedSliceEllipsisMask(value int64) StridedSliceAttr {
+ return func(m optionalAttr) {
+ m["ellipsis_mask"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// AvgPool3DAttr is an optional argument to AvgPool3D.
-type AvgPool3DAttr func(optionalAttr)
-
-// AvgPool3DDataFormat sets the optional data_format attribute to value.
+// StridedSliceNewAxisMask sets the optional new_axis_mask attribute to value.
//
-// value: The data format of the input and output data. With the
-// default format "NDHWC", the data is stored in the order of:
-// [batch, in_depth, in_height, in_width, in_channels].
-// Alternatively, the format could be "NCDHW", the data storage order is:
-// [batch, in_channels, in_depth, in_height, in_width].
-// If not specified, defaults to "NDHWC"
-func AvgPool3DDataFormat(value string) AvgPool3DAttr {
+// value: a bitmask where bit `i` being 1 means the `i`th
+// specification creates a new shape 1 dimension. For example
+// `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.
+// If not specified, defaults to 0
+func StridedSliceNewAxisMask(value int64) StridedSliceAttr {
return func(m optionalAttr) {
- m["data_format"] = value
+ m["new_axis_mask"] = value
}
}
-// Performs 3D average pooling on the input.
-//
-// Arguments:
-// input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
-// ksize: 1-D tensor of length 5. The size of the window for each dimension of
-// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
-// strides: 1-D tensor of length 5. The stride of the sliding window for each
-// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
-// padding: The type of padding algorithm to use.
+// StridedSliceShrinkAxisMask sets the optional shrink_axis_mask attribute to value.
//
-// Returns The average pooled output tensor.
-func AvgPool3D(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPool3DAttr) (output tf.Output) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
- for _, a := range optional {
- a(attrs)
- }
- opspec := tf.OpSpec{
- Type: "AvgPool3D",
- Input: []tf.Input{
- input,
- },
- Attrs: attrs,
+// value: a bitmask where bit `i` implies that the `i`th
+// specification should shrink the dimensionality. begin and end
+// must imply a slice of size 1 in the dimension. For example in
+// python one might do `foo[:, 3, :]` which would result in
+// `shrink_axis_mask` being 2.
+// If not specified, defaults to 0
+func StridedSliceShrinkAxisMask(value int64) StridedSliceAttr {
+ return func(m optionalAttr) {
+ m["shrink_axis_mask"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// Produces the max pool of the input tensor for quantized types.
+// Return a strided slice from `input`.
//
-// Arguments:
-// input: The 4D (batch x rows x cols x depth) Tensor to MaxReduce over.
-// min_input: The float value that the lowest quantized input value represents.
-// max_input: The float value that the highest quantized input value represents.
-// ksize: The size of the window for each dimension of the input tensor.
-// The length must be 4 to match the number of dimensions of the input.
-// strides: The stride of the sliding window for each dimension of the input
-// tensor. The length must be 4 to match the number of dimensions of the input.
-// padding: The type of padding algorithm to use.
+// Note, most python users will want to use the Python `Tensor.__getitem__`
+// or `Variable.__getitem__` rather than this op directly.
//
-// Returns The float value that the lowest quantized output value represents.The float value that the highest quantized output value represents.
-func QuantizedMaxPool(scope *Scope, input tf.Output, min_input tf.Output, max_input tf.Output, ksize []int64, strides []int64, padding string) (output tf.Output, min_output tf.Output, max_output tf.Output) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
- opspec := tf.OpSpec{
- Type: "QuantizedMaxPool",
- Input: []tf.Input{
- input, min_input, max_input,
- },
- Attrs: attrs,
- }
- op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
-}
-
-// AvgPool3DGradAttr is an optional argument to AvgPool3DGrad.
-type AvgPool3DGradAttr func(optionalAttr)
-
-// AvgPool3DGradDataFormat sets the optional data_format attribute to value.
+// The goal of this op is to produce a new tensor with a subset of
+// the elements from the `n` dimensional `input` tensor. The subset is chosen using
+// a sequence of `m` sparse range specifications encoded into the arguments
+// of this function. Note, in some cases
+// `m` could be equal to `n`, but this need not be the case. Each
+// range specification entry can be one of the following:
//
-// value: The data format of the input and output data. With the
-// default format "NDHWC", the data is stored in the order of:
-// [batch, in_depth, in_height, in_width, in_channels].
-// Alternatively, the format could be "NCDHW", the data storage order is:
-// [batch, in_channels, in_depth, in_height, in_width].
-// If not specified, defaults to "NDHWC"
-func AvgPool3DGradDataFormat(value string) AvgPool3DGradAttr {
- return func(m optionalAttr) {
- m["data_format"] = value
- }
-}
-
-// Computes gradients of average pooling function.
+// - An ellipsis (...). Ellipses are used to imply zero or more
+// dimensions of full-dimension selection and are produced using
+// `ellipsis_mask`. For example, `foo[...]` is the identity slice.
//
-// Arguments:
-// orig_input_shape: The original input dimensions.
-// grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.
-// ksize: 1-D tensor of length 5. The size of the window for each dimension of
-// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
-// strides: 1-D tensor of length 5. The stride of the sliding window for each
-// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
-// padding: The type of padding algorithm to use.
+// - A new axis. This is used to insert a new shape=1 dimension and is
+// produced using `new_axis_mask`. For example, `foo[:, ...]` where
+// `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor.
//
-// Returns The backprop for input.
-func AvgPool3DGrad(scope *Scope, orig_input_shape tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPool3DGradAttr) (output tf.Output) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
- for _, a := range optional {
- a(attrs)
- }
- opspec := tf.OpSpec{
- Type: "AvgPool3DGrad",
- Input: []tf.Input{
- orig_input_shape, grad,
- },
- Attrs: attrs,
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// Writes a `GraphDef` protocol buffer to a `SummaryWriter`.
//
-// Arguments:
-// writer: Handle of `SummaryWriter`.
-// step: The step to write the summary for.
-// tensor: A scalar string of the serialized tf.GraphDef proto.
+// - A range `begin:end:stride`. This is used to specify how much to choose from
+// a given dimension. `stride` can be any integer but 0. `begin` is an integer
+// which represents the index of the first value to select while `end` represents
+// the index of the last value to select. The number of values selected in each
+// dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`.
+// `begin` and `end` can be negative where `-1` is the last element, `-2` is
+// the second to last. `begin_mask` controls whether to replace the explicitly
+// given `begin` with an implicit effective value of `0` if `stride > 0` and
+// `-1` if `stride < 0`. `end_mask` is analogous but produces the number
+// required to create the largest open interval. For example, given a shape
+// `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do
+// not assume this is equivalent to `foo[0:-1]` which has an effective `begin`
+// and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the
+// first dimension of a tensor while dropping the last two (in the original
+// order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`.
//
-// Returns the created operation.
-func WriteGraphSummary(scope *Scope, writer tf.Output, step tf.Output, tensor tf.Output) (o *tf.Operation) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "WriteGraphSummary",
- Input: []tf.Input{
- writer, step, tensor,
- },
- }
- return scope.AddOperation(opspec)
-}
-
-// MaxPool3DGradGradAttr is an optional argument to MaxPool3DGradGrad.
-type MaxPool3DGradGradAttr func(optionalAttr)
-
-// MaxPool3DGradGradDataFormat sets the optional data_format attribute to value.
+// - A single index. This is used to keep only elements that have a given
+// index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a
+// shape `(6,)` tensor. This is encoded in `begin` and `end` and
+// `shrink_axis_mask`.
//
-// value: The data format of the input and output data. With the
-// default format "NDHWC", the data is stored in the order of:
-// [batch, in_depth, in_height, in_width, in_channels].
-// Alternatively, the format could be "NCDHW", the data storage order is:
-// [batch, in_channels, in_depth, in_height, in_width].
-// If not specified, defaults to "NDHWC"
-func MaxPool3DGradGradDataFormat(value string) MaxPool3DGradGradAttr {
- return func(m optionalAttr) {
- m["data_format"] = value
- }
-}
-
-// Computes second-order gradients of the maxpooling function.
+// Each conceptual range specification is encoded in the op's argument. This
+// encoding is best understand by considering a non-trivial example. In
+// particular,
+// `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as
+//
+// ```
+// begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0)
+// end = [2, 4, x, x, -3, x]
+// strides = [1, 1, x, x, -1, 1]
+// begin_mask = 1<<4 | 1 << 5 = 48
+// end_mask = 1<<5 = 32
+// ellipsis_mask = 1<<3 = 8
+// new_axis_mask = 1<<2 4
+// shrink_axis_mask = 1<<0
+// ```
+//
+// In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of
+// the slice becomes (2, 1, 5, 5, 2, 5).
+// Let us walk step by step through each argument specification.
+//
+// 1. The first argument in the example slice is turned into `begin = 1` and
+// `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we
+// also set the appropriate bit in `shrink_axis_mask`.
+//
+// 2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have
+// zero bits contributed.
+//
+// 3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1
+// dimension in the final shape. Dummy values are contributed to begin,
+// end and stride, while the new_axis_mask bit is set.
+//
+// 4. `...` grab the full ranges from as many dimensions as needed to
+// fully specify a slice for every dimension of the input shape.
+//
+// 5. `:-3:-1` shows the use of negative indices. A negative index `i` associated
+// with a dimension that has shape `s` is converted to a positive index
+// `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion
+// is done internally so begin, end and strides receive x, -3, and -1.
+// The appropriate begin_mask bit is set to indicate the start range is the
+// full range (ignoring the x).
+//
+// 6. `:` indicates that the entire contents of the corresponding dimension
+// is selected. This is equivalent to `::` or `0::1`. begin, end, and strides
+// receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and
+// `end_mask` are also set.
+//
+// *Requirements*:
+// `0 != strides[i] for i in [0, m)`
+// `ellipsis_mask must be a power of two (only one ellipsis)`
//
// Arguments:
-// orig_input: The original input tensor.
-// orig_output: The original output tensor.
-// grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.
-// ksize: 1-D tensor of length 5. The size of the window for each dimension of
-// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
-// strides: 1-D tensor of length 5. The stride of the sliding window for each
-// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
-// padding: The type of padding algorithm to use.
//
-// Returns Gradients of gradients w.r.t. the input to `max_pool`.
-func MaxPool3DGradGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPool3DGradGradAttr) (output tf.Output) {
+// begin: `begin[k]` specifies the offset into the `k`th range specification.
+// The exact dimension this corresponds to will be determined by context.
+// Out-of-bounds values will be silently clamped. If the `k`th bit of
+// `begin_mask` then `begin[k]` is ignored and the full range of the
+// appropriate dimension is used instead. Negative values causes indexing
+// to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`.
+// end: `end[i]` is like `begin` with the exception that `end_mask` is
+// used to determine full ranges.
+// strides: `strides[i]` specifies the increment in the `i`th specification
+// after extracting a given element. Negative indices will reverse
+// the original order. Out or range values are
+// clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`
+func StridedSlice(scope *Scope, input tf.Output, begin tf.Output, end tf.Output, strides tf.Output, optional ...StridedSliceAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "MaxPool3DGradGrad",
+ Type: "StridedSlice",
Input: []tf.Input{
- orig_input, orig_output, grad,
+ input, begin, end, strides,
},
Attrs: attrs,
}
@@ -22119,158 +21748,176 @@ func MaxPool3DGradGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output
return op.Output(0)
}
-// FakeQuantWithMinMaxArgsGradientAttr is an optional argument to FakeQuantWithMinMaxArgsGradient.
-type FakeQuantWithMinMaxArgsGradientAttr func(optionalAttr)
+// PriorityQueueV2Attr is an optional argument to PriorityQueueV2.
+type PriorityQueueV2Attr func(optionalAttr)
-// FakeQuantWithMinMaxArgsGradientMin sets the optional min attribute to value.
-// If not specified, defaults to -6
-func FakeQuantWithMinMaxArgsGradientMin(value float32) FakeQuantWithMinMaxArgsGradientAttr {
+// PriorityQueueV2ComponentTypes sets the optional component_types attribute to value.
+//
+// value: The type of each component in a value.
+// If not specified, defaults to <>
+//
+// REQUIRES: len(value) >= 0
+func PriorityQueueV2ComponentTypes(value []tf.DataType) PriorityQueueV2Attr {
return func(m optionalAttr) {
- m["min"] = value
+ m["component_types"] = value
}
}
-// FakeQuantWithMinMaxArgsGradientMax sets the optional max attribute to value.
-// If not specified, defaults to 6
-func FakeQuantWithMinMaxArgsGradientMax(value float32) FakeQuantWithMinMaxArgsGradientAttr {
+// PriorityQueueV2Capacity sets the optional capacity attribute to value.
+//
+// value: The upper bound on the number of elements in this queue.
+// Negative numbers mean no limit.
+// If not specified, defaults to -1
+func PriorityQueueV2Capacity(value int64) PriorityQueueV2Attr {
return func(m optionalAttr) {
- m["max"] = value
+ m["capacity"] = value
}
}
-// FakeQuantWithMinMaxArgsGradientNumBits sets the optional num_bits attribute to value.
-// If not specified, defaults to 8
-func FakeQuantWithMinMaxArgsGradientNumBits(value int64) FakeQuantWithMinMaxArgsGradientAttr {
+// PriorityQueueV2Container sets the optional container attribute to value.
+//
+// value: If non-empty, this queue is placed in the given container.
+// Otherwise, a default container is used.
+// If not specified, defaults to ""
+func PriorityQueueV2Container(value string) PriorityQueueV2Attr {
return func(m optionalAttr) {
- m["num_bits"] = value
+ m["container"] = value
}
}
-// FakeQuantWithMinMaxArgsGradientNarrowRange sets the optional narrow_range attribute to value.
-// If not specified, defaults to false
-func FakeQuantWithMinMaxArgsGradientNarrowRange(value bool) FakeQuantWithMinMaxArgsGradientAttr {
+// PriorityQueueV2SharedName sets the optional shared_name attribute to value.
+//
+// value: If non-empty, this queue will be shared under the given name
+// across multiple sessions.
+// If not specified, defaults to ""
+func PriorityQueueV2SharedName(value string) PriorityQueueV2Attr {
return func(m optionalAttr) {
- m["narrow_range"] = value
+ m["shared_name"] = value
}
}
-// Compute gradients for a FakeQuantWithMinMaxArgs operation.
+// A queue that produces elements sorted by the first component value.
+//
+// Note that the PriorityQueue requires the first component of any element
+// to be a scalar int64, in addition to the other elements declared by
+// component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue
+// and DequeueMany) on a PriorityQueue will all require (resp. output) one extra
+// entry in their input (resp. output) lists.
//
// Arguments:
-// gradients: Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.
-// inputs: Values passed as inputs to the FakeQuantWithMinMaxArgs operation.
+// shapes: The shape of each component in a value. The length of this attr must
+// be either 0 or the same as the length of component_types. If the length of
+// this attr is 0, the shapes of queue elements are not constrained, and
+// only one element may be dequeued at a time.
//
-// Returns Backpropagated gradients below the FakeQuantWithMinMaxArgs operation:
-// `gradients * (inputs >= min && inputs <= max)`.
-func FakeQuantWithMinMaxArgsGradient(scope *Scope, gradients tf.Output, inputs tf.Output, optional ...FakeQuantWithMinMaxArgsGradientAttr) (backprops tf.Output) {
+// Returns The handle to the queue.
+func PriorityQueueV2(scope *Scope, shapes []tf.Shape, optional ...PriorityQueueV2Attr) (handle tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"shapes": shapes}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "FakeQuantWithMinMaxArgsGradient",
- Input: []tf.Input{
- gradients, inputs,
- },
+ Type: "PriorityQueueV2",
+
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// CriticalSectionOpAttr is an optional argument to CriticalSectionOp.
-type CriticalSectionOpAttr func(optionalAttr)
+// UnstageAttr is an optional argument to Unstage.
+type UnstageAttr func(optionalAttr)
-// CriticalSectionOpContainer sets the optional container attribute to value.
+// UnstageCapacity sets the optional capacity attribute to value.
+// If not specified, defaults to 0
//
-// value: the container this critical section is placed in.
+// REQUIRES: value >= 0
+func UnstageCapacity(value int64) UnstageAttr {
+ return func(m optionalAttr) {
+ m["capacity"] = value
+ }
+}
+
+// UnstageMemoryLimit sets the optional memory_limit attribute to value.
+// If not specified, defaults to 0
+//
+// REQUIRES: value >= 0
+func UnstageMemoryLimit(value int64) UnstageAttr {
+ return func(m optionalAttr) {
+ m["memory_limit"] = value
+ }
+}
+
+// UnstageContainer sets the optional container attribute to value.
// If not specified, defaults to ""
-func CriticalSectionOpContainer(value string) CriticalSectionOpAttr {
+func UnstageContainer(value string) UnstageAttr {
return func(m optionalAttr) {
m["container"] = value
}
}
-// CriticalSectionOpSharedName sets the optional shared_name attribute to value.
-//
-// value: the name by which this critical section is referred to.
+// UnstageSharedName sets the optional shared_name attribute to value.
// If not specified, defaults to ""
-func CriticalSectionOpSharedName(value string) CriticalSectionOpAttr {
+func UnstageSharedName(value string) UnstageAttr {
return func(m optionalAttr) {
m["shared_name"] = value
}
}
-// Creates a handle to a CriticalSection resource.
-func CriticalSectionOp(scope *Scope, optional ...CriticalSectionOpAttr) (resource tf.Output) {
+// Op is similar to a lightweight Dequeue.
+//
+// The basic functionality is similar to dequeue with many fewer
+// capabilities and options. This Op is optimized for performance.
+func Unstage(scope *Scope, dtypes []tf.DataType, optional ...UnstageAttr) (values []tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"dtypes": dtypes}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "CriticalSectionOp",
+ Type: "Unstage",
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// Computes gradients of the maxpooling function.
-//
-// Arguments:
-// input: The original input.
-// grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the
-// output of `max_pool`.
-// argmax: The indices of the maximum values chosen for each output of `max_pool`.
-// ksize: The size of the window for each dimension of the input tensor.
-// strides: The stride of the sliding window for each dimension of the
-// input tensor.
-// padding: The type of padding algorithm to use.
-//
-// Returns Gradients w.r.t. the input of `max_pool`.
-func MaxPoolGradWithArgmax(scope *Scope, input tf.Output, grad tf.Output, argmax tf.Output, ksize []int64, strides []int64, padding string) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
- opspec := tf.OpSpec{
- Type: "MaxPoolGradWithArgmax",
- Input: []tf.Input{
- input, grad, argmax,
- },
- Attrs: attrs,
+ var idx int
+ var err error
+ if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
+ scope.UpdateErr("Unstage", err)
+ return
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return values
}
-// StringToNumberAttr is an optional argument to StringToNumber.
-type StringToNumberAttr func(optionalAttr)
+// ArgMaxAttr is an optional argument to ArgMax.
+type ArgMaxAttr func(optionalAttr)
-// StringToNumberOutType sets the optional out_type attribute to value.
-//
-// value: The numeric type to interpret each string in `string_tensor` as.
-// If not specified, defaults to DT_FLOAT
-func StringToNumberOutType(value tf.DataType) StringToNumberAttr {
+// ArgMaxOutputType sets the optional output_type attribute to value.
+// If not specified, defaults to DT_INT64
+func ArgMaxOutputType(value tf.DataType) ArgMaxAttr {
return func(m optionalAttr) {
- m["out_type"] = value
+ m["output_type"] = value
}
}
-// Converts each string in the input Tensor to the specified numeric type.
+// Returns the index with the largest value across dimensions of a tensor.
//
-// (Note that int32 overflow results in an error while float overflow
-// results in a rounded value.)
+// Note that in case of ties the identity of the return value is not guaranteed.
//
-// Returns A Tensor of the same shape as the input `string_tensor`.
-func StringToNumber(scope *Scope, string_tensor tf.Output, optional ...StringToNumberAttr) (output tf.Output) {
+// Arguments:
+//
+// dimension: int32 or int64, must be in the range `[-rank(input), rank(input))`.
+// Describes which dimension of the input Tensor to reduce across. For vectors,
+// use dimension = 0.
+func ArgMax(scope *Scope, input tf.Output, dimension tf.Output, optional ...ArgMaxAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -22279,9 +21926,9 @@ func StringToNumber(scope *Scope, string_tensor tf.Output, optional ...StringToN
a(attrs)
}
opspec := tf.OpSpec{
- Type: "StringToNumber",
+ Type: "ArgMax",
Input: []tf.Input{
- string_tensor,
+ input, dimension,
},
Attrs: attrs,
}
@@ -22289,73 +21936,60 @@ func StringToNumber(scope *Scope, string_tensor tf.Output, optional ...StringToN
return op.Output(0)
}
-// Returns the truth value of NOT x element-wise.
-func LogicalNot(scope *Scope, x tf.Output) (y tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "LogicalNot",
- Input: []tf.Input{
- x,
- },
+// ResourceStridedSliceAssignAttr is an optional argument to ResourceStridedSliceAssign.
+type ResourceStridedSliceAssignAttr func(optionalAttr)
+
+// ResourceStridedSliceAssignBeginMask sets the optional begin_mask attribute to value.
+// If not specified, defaults to 0
+func ResourceStridedSliceAssignBeginMask(value int64) ResourceStridedSliceAssignAttr {
+ return func(m optionalAttr) {
+ m["begin_mask"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// LRNGradAttr is an optional argument to LRNGrad.
-type LRNGradAttr func(optionalAttr)
-
-// LRNGradDepthRadius sets the optional depth_radius attribute to value.
-//
-// value: A depth radius.
-// If not specified, defaults to 5
-func LRNGradDepthRadius(value int64) LRNGradAttr {
+// ResourceStridedSliceAssignEndMask sets the optional end_mask attribute to value.
+// If not specified, defaults to 0
+func ResourceStridedSliceAssignEndMask(value int64) ResourceStridedSliceAssignAttr {
return func(m optionalAttr) {
- m["depth_radius"] = value
+ m["end_mask"] = value
}
}
-// LRNGradBias sets the optional bias attribute to value.
-//
-// value: An offset (usually > 0 to avoid dividing by 0).
-// If not specified, defaults to 1
-func LRNGradBias(value float32) LRNGradAttr {
+// ResourceStridedSliceAssignEllipsisMask sets the optional ellipsis_mask attribute to value.
+// If not specified, defaults to 0
+func ResourceStridedSliceAssignEllipsisMask(value int64) ResourceStridedSliceAssignAttr {
return func(m optionalAttr) {
- m["bias"] = value
+ m["ellipsis_mask"] = value
}
}
-// LRNGradAlpha sets the optional alpha attribute to value.
-//
-// value: A scale factor, usually positive.
-// If not specified, defaults to 1
-func LRNGradAlpha(value float32) LRNGradAttr {
+// ResourceStridedSliceAssignNewAxisMask sets the optional new_axis_mask attribute to value.
+// If not specified, defaults to 0
+func ResourceStridedSliceAssignNewAxisMask(value int64) ResourceStridedSliceAssignAttr {
return func(m optionalAttr) {
- m["alpha"] = value
+ m["new_axis_mask"] = value
}
}
-// LRNGradBeta sets the optional beta attribute to value.
-//
-// value: An exponent.
-// If not specified, defaults to 0.5
-func LRNGradBeta(value float32) LRNGradAttr {
+// ResourceStridedSliceAssignShrinkAxisMask sets the optional shrink_axis_mask attribute to value.
+// If not specified, defaults to 0
+func ResourceStridedSliceAssignShrinkAxisMask(value int64) ResourceStridedSliceAssignAttr {
return func(m optionalAttr) {
- m["beta"] = value
+ m["shrink_axis_mask"] = value
}
}
-// Gradients for Local Response Normalization.
+// Assign `value` to the sliced l-value reference of `ref`.
//
-// Arguments:
-// input_grads: 4-D with shape `[batch, height, width, channels]`.
-// input_image: 4-D with shape `[batch, height, width, channels]`.
-// output_image: 4-D with shape `[batch, height, width, channels]`.
+// The values of `value` are assigned to the positions in the variable
+// `ref` that are selected by the slice parameters. The slice parameters
+// `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.
//
-// Returns The gradients for LRN.
-func LRNGrad(scope *Scope, input_grads tf.Output, input_image tf.Output, output_image tf.Output, optional ...LRNGradAttr) (output tf.Output) {
+// NOTE this op currently does not support broadcasting and so `value`'s
+// shape must be exactly the shape produced by the slice of `ref`.
+//
+// Returns the created operation.
+func ResourceStridedSliceAssign(scope *Scope, ref tf.Output, begin tf.Output, end tf.Output, strides tf.Output, value tf.Output, optional ...ResourceStridedSliceAssignAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
@@ -22364,48 +21998,44 @@ func LRNGrad(scope *Scope, input_grads tf.Output, input_image tf.Output, output_
a(attrs)
}
opspec := tf.OpSpec{
- Type: "LRNGrad",
+ Type: "ResourceStridedSliceAssign",
Input: []tf.Input{
- input_grads, input_image, output_image,
+ ref, begin, end, strides, value,
},
Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// EncodePngAttr is an optional argument to EncodePng.
-type EncodePngAttr func(optionalAttr)
+// QueueEnqueueV2Attr is an optional argument to QueueEnqueueV2.
+type QueueEnqueueV2Attr func(optionalAttr)
-// EncodePngCompression sets the optional compression attribute to value.
+// QueueEnqueueV2TimeoutMs sets the optional timeout_ms attribute to value.
//
-// value: Compression level.
+// value: If the queue is full, this operation will block for up to
+// timeout_ms milliseconds.
+// Note: This option is not supported yet.
// If not specified, defaults to -1
-func EncodePngCompression(value int64) EncodePngAttr {
+func QueueEnqueueV2TimeoutMs(value int64) QueueEnqueueV2Attr {
return func(m optionalAttr) {
- m["compression"] = value
+ m["timeout_ms"] = value
}
}
-// PNG-encode an image.
-//
-// `image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]`
-// where `channels` is:
+// Enqueues a tuple of one or more tensors in the given queue.
//
-// * 1: for grayscale.
-// * 2: for grayscale + alpha.
-// * 3: for RGB.
-// * 4: for RGBA.
+// The components input has k elements, which correspond to the components of
+// tuples stored in the given queue.
//
-// The ZLIB compression level, `compression`, can be -1 for the PNG-encoder
-// default or a value from 0 to 9. 9 is the highest compression level, generating
-// the smallest output, but is slower.
+// N.B. If the queue is full, this operation will block until the given
+// element has been enqueued (or 'timeout_ms' elapses, if specified).
//
// Arguments:
-// image: 3-D with shape `[height, width, channels]`.
+// handle: The handle to a queue.
+// components: One or more tensors from which the enqueued tensors should be taken.
//
-// Returns 0-D. PNG-encoded image.
-func EncodePng(scope *Scope, image tf.Output, optional ...EncodePngAttr) (contents tf.Output) {
+// Returns the created operation.
+func QueueEnqueueV2(scope *Scope, handle tf.Output, components []tf.Output, optional ...QueueEnqueueV2Attr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
@@ -22414,380 +22044,424 @@ func EncodePng(scope *Scope, image tf.Output, optional ...EncodePngAttr) (conten
a(attrs)
}
opspec := tf.OpSpec{
- Type: "EncodePng",
+ Type: "QueueEnqueueV2",
Input: []tf.Input{
- image,
+ handle, tf.OutputList(components),
},
Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// MaxPoolAttr is an optional argument to MaxPool.
-type MaxPoolAttr func(optionalAttr)
+// QueueDequeueManyV2Attr is an optional argument to QueueDequeueManyV2.
+type QueueDequeueManyV2Attr func(optionalAttr)
-// MaxPoolDataFormat sets the optional data_format attribute to value.
+// QueueDequeueManyV2TimeoutMs sets the optional timeout_ms attribute to value.
//
-// value: Specify the data format of the input and output data. With the
-// default format "NHWC", the data is stored in the order of:
-// [batch, in_height, in_width, in_channels].
-// Alternatively, the format could be "NCHW", the data storage order of:
-// [batch, in_channels, in_height, in_width].
-// If not specified, defaults to "NHWC"
-func MaxPoolDataFormat(value string) MaxPoolAttr {
+// value: If the queue has fewer than n elements, this operation
+// will block for up to timeout_ms milliseconds.
+// Note: This option is not supported yet.
+// If not specified, defaults to -1
+func QueueDequeueManyV2TimeoutMs(value int64) QueueDequeueManyV2Attr {
return func(m optionalAttr) {
- m["data_format"] = value
+ m["timeout_ms"] = value
}
}
-// Performs max pooling on the input.
+// Dequeues `n` tuples of one or more tensors from the given queue.
+//
+// If the queue is closed and there are fewer than `n` elements, then an
+// OutOfRange error is returned.
+//
+// This operation concatenates queue-element component tensors along the
+// 0th dimension to make a single component tensor. All of the components
+// in the dequeued tuple will have size `n` in the 0th dimension.
+//
+// This operation has `k` outputs, where `k` is the number of components in
+// the tuples stored in the given queue, and output `i` is the ith
+// component of the dequeued tuple.
+//
+// N.B. If the queue is empty, this operation will block until `n` elements
+// have been dequeued (or 'timeout_ms' elapses, if specified).
//
// Arguments:
-// input: 4-D input to pool over.
-// ksize: The size of the window for each dimension of the input tensor.
-// strides: The stride of the sliding window for each dimension of the
-// input tensor.
-// padding: The type of padding algorithm to use.
+// handle: The handle to a queue.
+// n: The number of tuples to dequeue.
+// component_types: The type of each component in a tuple.
//
-// Returns The max pooled output tensor.
-func MaxPool(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolAttr) (output tf.Output) {
+// Returns One or more tensors that were dequeued as a tuple.
+func QueueDequeueManyV2(scope *Scope, handle tf.Output, n tf.Output, component_types []tf.DataType, optional ...QueueDequeueManyV2Attr) (components []tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
+ attrs := map[string]interface{}{"component_types": component_types}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "MaxPool",
+ Type: "QueueDequeueManyV2",
Input: []tf.Input{
- input,
+ handle, n,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// Fast Fourier transform.
-//
-// Computes the 1-dimensional discrete Fourier transform over the inner-most
-// dimension of `input`.
-//
-// Arguments:
-// input: A complex64 tensor.
-//
-// Returns A complex64 tensor of the same shape as `input`. The inner-most
-// dimension of `input` is replaced with its 1D Fourier transform.
-//
-// @compatibility(numpy)
-// Equivalent to np.fft.fft
-// @end_compatibility
-func FFT(scope *Scope, input tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
- opspec := tf.OpSpec{
- Type: "FFT",
- Input: []tf.Input{
- input,
- },
+ var idx int
+ var err error
+ if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
+ scope.UpdateErr("QueueDequeueManyV2", err)
+ return
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return components
}
-// MaxPoolWithArgmaxAttr is an optional argument to MaxPoolWithArgmax.
-type MaxPoolWithArgmaxAttr func(optionalAttr)
+// EncodeBase64Attr is an optional argument to EncodeBase64.
+type EncodeBase64Attr func(optionalAttr)
-// MaxPoolWithArgmaxTargmax sets the optional Targmax attribute to value.
-// If not specified, defaults to DT_INT64
-func MaxPoolWithArgmaxTargmax(value tf.DataType) MaxPoolWithArgmaxAttr {
+// EncodeBase64Pad sets the optional pad attribute to value.
+//
+// value: Bool whether padding is applied at the ends.
+// If not specified, defaults to false
+func EncodeBase64Pad(value bool) EncodeBase64Attr {
return func(m optionalAttr) {
- m["Targmax"] = value
+ m["pad"] = value
}
}
-// Performs max pooling on the input and outputs both max values and indices.
+// Encode strings into web-safe base64 format.
//
-// The indices in `argmax` are flattened, so that a maximum value at position
-// `[b, y, x, c]` becomes flattened index
-// `((b * height + y) * width + x) * channels + c`.
+// Refer to the following article for more information on base64 format:
+// en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the
+// end so that the encoded has length multiple of 4. See Padding section of the
+// link above.
//
-// The indices returned are always in `[0, height) x [0, width)` before flattening,
-// even if padding is involved and the mathematically correct answer is outside
-// (either negative or too large). This is a bug, but fixing it is difficult to do
-// in a safe backwards compatible way, especially due to flattening.
+// Web-safe means that the encoder uses - and _ instead of + and /.
//
// Arguments:
-// input: 4-D with shape `[batch, height, width, channels]`. Input to pool over.
-// ksize: The size of the window for each dimension of the input tensor.
-// strides: The stride of the sliding window for each dimension of the
-// input tensor.
-// padding: The type of padding algorithm to use.
+// input: Strings to be encoded.
//
-// Returns The max pooled output tensor.4-D. The flattened indices of the max values chosen for each output.
-func MaxPoolWithArgmax(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolWithArgmaxAttr) (output tf.Output, argmax tf.Output) {
+// Returns Input strings encoded in base64.
+func EncodeBase64(scope *Scope, input tf.Output, optional ...EncodeBase64Attr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "MaxPoolWithArgmax",
+ Type: "EncodeBase64",
Input: []tf.Input{
input,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
+ return op.Output(0)
}
-// MaxPoolGradGradV2Attr is an optional argument to MaxPoolGradGradV2.
-type MaxPoolGradGradV2Attr func(optionalAttr)
+// Deprecated. Use TensorArrayCloseV3
+//
+// Returns the created operation.
+func TensorArrayCloseV2(scope *Scope, handle tf.Output) (o *tf.Operation) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "TensorArrayCloseV2",
+ Input: []tf.Input{
+ handle,
+ },
+ }
+ return scope.AddOperation(opspec)
+}
-// MaxPoolGradGradV2DataFormat sets the optional data_format attribute to value.
+// QueueDequeueUpToV2Attr is an optional argument to QueueDequeueUpToV2.
+type QueueDequeueUpToV2Attr func(optionalAttr)
+
+// QueueDequeueUpToV2TimeoutMs sets the optional timeout_ms attribute to value.
//
-// value: Specify the data format of the input and output data. With the
-// default format "NHWC", the data is stored in the order of:
-// [batch, in_height, in_width, in_channels].
-// Alternatively, the format could be "NCHW", the data storage order of:
-// [batch, in_channels, in_height, in_width].
-// If not specified, defaults to "NHWC"
-func MaxPoolGradGradV2DataFormat(value string) MaxPoolGradGradV2Attr {
+// value: If the queue has fewer than n elements, this operation
+// will block for up to timeout_ms milliseconds.
+// Note: This option is not supported yet.
+// If not specified, defaults to -1
+func QueueDequeueUpToV2TimeoutMs(value int64) QueueDequeueUpToV2Attr {
return func(m optionalAttr) {
- m["data_format"] = value
+ m["timeout_ms"] = value
}
}
-// Computes second-order gradients of the maxpooling function.
+// Dequeues `n` tuples of one or more tensors from the given queue.
+//
+// This operation is not supported by all queues. If a queue does not support
+// DequeueUpTo, then an Unimplemented error is returned.
+//
+// If the queue is closed and there are more than 0 but less than `n`
+// elements remaining, then instead of returning an OutOfRange error like
+// QueueDequeueMany, less than `n` elements are returned immediately. If
+// the queue is closed and there are 0 elements left in the queue, then
+// an OutOfRange error is returned just like in QueueDequeueMany.
+// Otherwise the behavior is identical to QueueDequeueMany:
+//
+// This operation concatenates queue-element component tensors along the
+// 0th dimension to make a single component tensor. All of the components
+// in the dequeued tuple will have size n in the 0th dimension.
+//
+// This operation has `k` outputs, where `k` is the number of components in
+// the tuples stored in the given queue, and output `i` is the ith
+// component of the dequeued tuple.
//
// Arguments:
-// orig_input: The original input tensor.
-// orig_output: The original output tensor.
-// grad: 4-D. Gradients of gradients w.r.t. the input of `max_pool`.
-// ksize: The size of the window for each dimension of the input tensor.
-// strides: The stride of the sliding window for each dimension of the
-// input tensor.
-// padding: The type of padding algorithm to use.
+// handle: The handle to a queue.
+// n: The number of tuples to dequeue.
+// component_types: The type of each component in a tuple.
//
-// Returns Gradients of gradients w.r.t. the input to `max_pool`.
-func MaxPoolGradGradV2(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize tf.Output, strides tf.Output, padding string, optional ...MaxPoolGradGradV2Attr) (output tf.Output) {
+// Returns One or more tensors that were dequeued as a tuple.
+func QueueDequeueUpToV2(scope *Scope, handle tf.Output, n tf.Output, component_types []tf.DataType, optional ...QueueDequeueUpToV2Attr) (components []tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"padding": padding}
+ attrs := map[string]interface{}{"component_types": component_types}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "MaxPoolGradGradV2",
+ Type: "QueueDequeueUpToV2",
Input: []tf.Input{
- orig_input, orig_output, grad, ksize, strides,
+ handle, n,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ if scope.Err() != nil {
+ return
+ }
+ var idx int
+ var err error
+ if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
+ scope.UpdateErr("QueueDequeueUpToV2", err)
+ return
+ }
+ return components
}
-// Computes second-order gradients of the maxpooling function.
-//
-// Arguments:
-// input: The original input.
-// grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the
-// input of `max_pool`.
-// argmax: The indices of the maximum values chosen for each output of `max_pool`.
-// ksize: The size of the window for each dimension of the input tensor.
-// strides: The stride of the sliding window for each dimension of the
-// input tensor.
-// padding: The type of padding algorithm to use.
-//
-// Returns Gradients of gradients w.r.t. the input of `max_pool`.
-func MaxPoolGradGradWithArgmax(scope *Scope, input tf.Output, grad tf.Output, argmax tf.Output, ksize []int64, strides []int64, padding string) (output tf.Output) {
+// Computes inverse hyperbolic tangent of x element-wise.
+func Atanh(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
opspec := tf.OpSpec{
- Type: "MaxPoolGradGradWithArgmax",
+ Type: "Atanh",
Input: []tf.Input{
- input, grad, argmax,
+ x,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Compute the polygamma function \\(\psi^{(n)}(x)\\).
-//
-// The polygamma function is defined as:
-//
+// Returns true if queue is closed.
//
-// \\(\psi^{(n)}(x) = \frac{d^n}{dx^n} \psi(x)\\)
+// This operation returns true if the queue is closed and false if the queue
+// is open.
//
-// where \\(\psi(x)\\) is the digamma function.
-func Polygamma(scope *Scope, a tf.Output, x tf.Output) (z tf.Output) {
+// Arguments:
+// handle: The handle to a queue.
+func QueueIsClosedV2(scope *Scope, handle tf.Output) (is_closed tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Polygamma",
+ Type: "QueueIsClosedV2",
Input: []tf.Input{
- a, x,
+ handle,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors.
+// Returns the batched diagonal part of a batched tensor.
//
-// The `input` tensor has shape `[batch, in_height, in_width, depth]` and the
-// `filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each
-// input channel is processed independently of the others with its own structuring
-// function. The `output` tensor has shape
-// `[batch, out_height, out_width, depth]`. The spatial dimensions of the output
-// tensor depend on the `padding` algorithm. We currently only support the default
-// "NHWC" `data_format`.
+// This operation returns a tensor with the `diagonal` part
+// of the batched `input`. The `diagonal` part is computed as follows:
//
-// In detail, the grayscale morphological 2-D dilation is the max-sum correlation
-// (for consistency with `conv2d`, we use unmirrored filters):
+// Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
+// tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where:
//
-// output[b, y, x, c] =
-// max_{dy, dx} input[b,
-// strides[1] * y + rates[1] * dy,
-// strides[2] * x + rates[2] * dx,
-// c] +
-// filter[dy, dx, c]
+// `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`.
//
-// Max-pooling is a special case when the filter has size equal to the pooling
-// kernel size and contains all zeros.
+// The input must be at least a matrix.
//
-// Note on duality: The dilation of `input` by the `filter` is equal to the
-// negation of the erosion of `-input` by the reflected `filter`.
+// For example:
+//
+// ```
+// # 'input' is [[[1, 0, 0, 0]
+// [0, 2, 0, 0]
+// [0, 0, 3, 0]
+// [0, 0, 0, 4]],
+// [[5, 0, 0, 0]
+// [0, 6, 0, 0]
+// [0, 0, 7, 0]
+// [0, 0, 0, 8]]]
+//
+// and input.shape = (2, 4, 4)
+//
+// tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]]
+//
+// which has shape (2, 4)
+// ```
//
// Arguments:
-// input: 4-D with shape `[batch, in_height, in_width, depth]`.
-// filter: 3-D with shape `[filter_height, filter_width, depth]`.
-// strides: The stride of the sliding window for each dimension of the input
-// tensor. Must be: `[1, stride_height, stride_width, 1]`.
-// rates: The input stride for atrous morphological dilation. Must be:
-// `[1, rate_height, rate_width, 1]`.
-// padding: The type of padding algorithm to use.
+// input: Rank `k` tensor where `k >= 2`.
//
-// Returns 4-D with shape `[batch, out_height, out_width, depth]`.
-func Dilation2D(scope *Scope, input tf.Output, filter tf.Output, strides []int64, rates []int64, padding string) (output tf.Output) {
+// Returns The extracted diagonal(s) having shape
+// `diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`.
+func MatrixDiagPart(scope *Scope, input tf.Output) (diagonal tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"strides": strides, "rates": rates, "padding": padding}
opspec := tf.OpSpec{
- Type: "Dilation2D",
+ Type: "MatrixDiagPart",
Input: []tf.Input{
- input, filter,
+ input,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// AudioSpectrogramAttr is an optional argument to AudioSpectrogram.
-type AudioSpectrogramAttr func(optionalAttr)
+// Computes the absolute value of a tensor.
+//
+// Given a tensor `x`, this operation returns a tensor containing the absolute
+// value of each element in `x`. For example, if x is an input element and y is
+// an output element, this operation computes \\(y = |x|\\).
+func Abs(scope *Scope, x tf.Output) (y tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "Abs",
+ Input: []tf.Input{
+ x,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
-// AudioSpectrogramMagnitudeSquared sets the optional magnitude_squared attribute to value.
+// OrderedMapStageAttr is an optional argument to OrderedMapStage.
+type OrderedMapStageAttr func(optionalAttr)
+
+// OrderedMapStageCapacity sets the optional capacity attribute to value.
//
-// value: Whether to return the squared magnitude or just the
-// magnitude. Using squared magnitude can avoid extra calculations.
-// If not specified, defaults to false
-func AudioSpectrogramMagnitudeSquared(value bool) AudioSpectrogramAttr {
+// value: Maximum number of elements in the Staging Area. If > 0, inserts
+// on the container will block when the capacity is reached.
+// If not specified, defaults to 0
+//
+// REQUIRES: value >= 0
+func OrderedMapStageCapacity(value int64) OrderedMapStageAttr {
return func(m optionalAttr) {
- m["magnitude_squared"] = value
+ m["capacity"] = value
}
}
-// Produces a visualization of audio data over time.
-//
-// Spectrograms are a standard way of representing audio information as a series of
-// slices of frequency information, one slice for each window of time. By joining
-// these together into a sequence, they form a distinctive fingerprint of the sound
-// over time.
+// OrderedMapStageMemoryLimit sets the optional memory_limit attribute to value.
+// If not specified, defaults to 0
//
-// This op expects to receive audio data as an input, stored as floats in the range
-// -1 to 1, together with a window width in samples, and a stride specifying how
-// far to move the window between slices. From this it generates a three
-// dimensional output. The lowest dimension has an amplitude value for each
-// frequency during that time slice. The next dimension is time, with successive
-// frequency slices. The final dimension is for the channels in the input, so a
-// stereo audio input would have two here for example.
+// REQUIRES: value >= 0
+func OrderedMapStageMemoryLimit(value int64) OrderedMapStageAttr {
+ return func(m optionalAttr) {
+ m["memory_limit"] = value
+ }
+}
+
+// OrderedMapStageContainer sets the optional container attribute to value.
//
-// This means the layout when converted and saved as an image is rotated 90 degrees
-// clockwise from a typical spectrogram. Time is descending down the Y axis, and
-// the frequency decreases from left to right.
+// value: If non-empty, this queue is placed in the given container. Otherwise,
+// a default container is used.
+// If not specified, defaults to ""
+func OrderedMapStageContainer(value string) OrderedMapStageAttr {
+ return func(m optionalAttr) {
+ m["container"] = value
+ }
+}
+
+// OrderedMapStageSharedName sets the optional shared_name attribute to value.
//
-// Each value in the result represents the square root of the sum of the real and
-// imaginary parts of an FFT on the current window of samples. In this way, the
-// lowest dimension represents the power of each frequency in the current window,
-// and adjacent windows are concatenated in the next dimension.
+// value: It is necessary to match this name to the matching Unstage Op.
+// If not specified, defaults to ""
+func OrderedMapStageSharedName(value string) OrderedMapStageAttr {
+ return func(m optionalAttr) {
+ m["shared_name"] = value
+ }
+}
+
+// Stage (key, values) in the underlying container which behaves like a ordered
//
-// To get a more intuitive and visual look at what this operation does, you can run
-// tensorflow/examples/wav_to_spectrogram to read in an audio file and save out the
-// resulting spectrogram as a PNG image.
+// associative container. Elements are ordered by key.
//
// Arguments:
-// input: Float representation of audio data.
-// window_size: How wide the input window is in samples. For the highest efficiency
-// this should be a power of two, but other values are accepted.
-// stride: How widely apart the center of adjacent sample windows should be.
+// key: int64
//
-// Returns 3D representation of the audio frequencies as an image.
-func AudioSpectrogram(scope *Scope, input tf.Output, window_size int64, stride int64, optional ...AudioSpectrogramAttr) (spectrogram tf.Output) {
+// values: a list of tensors
+// dtypes A list of data types that inserted values should adhere to.
+//
+//
+// Returns the created operation.
+func OrderedMapStage(scope *Scope, key tf.Output, indices tf.Output, values []tf.Output, dtypes []tf.DataType, optional ...OrderedMapStageAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"window_size": window_size, "stride": stride}
+ attrs := map[string]interface{}{"dtypes": dtypes}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "AudioSpectrogram",
+ Type: "OrderedMapStage",
Input: []tf.Input{
- input,
+ key, indices, tf.OutputList(values),
},
Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// Computes the gradient of morphological 2-D dilation with respect to the input.
+// StackPushV2Attr is an optional argument to StackPushV2.
+type StackPushV2Attr func(optionalAttr)
+
+// StackPushV2SwapMemory sets the optional swap_memory attribute to value.
+//
+// value: Swap `elem` to CPU. Default to false.
+// If not specified, defaults to false
+func StackPushV2SwapMemory(value bool) StackPushV2Attr {
+ return func(m optionalAttr) {
+ m["swap_memory"] = value
+ }
+}
+
+// Push an element onto the stack.
//
// Arguments:
-// input: 4-D with shape `[batch, in_height, in_width, depth]`.
-// filter: 3-D with shape `[filter_height, filter_width, depth]`.
-// out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.
-// strides: 1-D of length 4. The stride of the sliding window for each dimension of
-// the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
-// rates: 1-D of length 4. The input stride for atrous morphological dilation.
-// Must be: `[1, rate_height, rate_width, 1]`.
-// padding: The type of padding algorithm to use.
+// handle: The handle to a stack.
+// elem: The tensor to be pushed onto the stack.
//
-// Returns 4-D with shape `[batch, in_height, in_width, depth]`.
-func Dilation2DBackpropInput(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, rates []int64, padding string) (in_backprop tf.Output) {
+// Returns The same tensor as the input 'elem'.
+func StackPushV2(scope *Scope, handle tf.Output, elem tf.Output, optional ...StackPushV2Attr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"strides": strides, "rates": rates, "padding": padding}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "Dilation2DBackpropInput",
+ Type: "StackPushV2",
Input: []tf.Input{
- input, filter, out_backprop,
+ handle, elem,
},
Attrs: attrs,
}
@@ -22795,272 +22469,333 @@ func Dilation2DBackpropInput(scope *Scope, input tf.Output, filter tf.Output, ou
return op.Output(0)
}
-// Returns the truth value of (x == y) element-wise.
+// FusedBatchNormGradV2Attr is an optional argument to FusedBatchNormGradV2.
+type FusedBatchNormGradV2Attr func(optionalAttr)
+
+// FusedBatchNormGradV2Epsilon sets the optional epsilon attribute to value.
//
-// *NOTE*: `Equal` supports broadcasting. More about broadcasting
-// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-func Equal(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "Equal",
- Input: []tf.Input{
- x, y,
- },
+// value: A small float number added to the variance of x.
+// If not specified, defaults to 0.0001
+func FusedBatchNormGradV2Epsilon(value float32) FusedBatchNormGradV2Attr {
+ return func(m optionalAttr) {
+ m["epsilon"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// Computes the gradient of morphological 2-D dilation with respect to the filter.
-//
-// Arguments:
-// input: 4-D with shape `[batch, in_height, in_width, depth]`.
-// filter: 3-D with shape `[filter_height, filter_width, depth]`.
-// out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.
-// strides: 1-D of length 4. The stride of the sliding window for each dimension of
-// the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
-// rates: 1-D of length 4. The input stride for atrous morphological dilation.
-// Must be: `[1, rate_height, rate_width, 1]`.
-// padding: The type of padding algorithm to use.
+// FusedBatchNormGradV2DataFormat sets the optional data_format attribute to value.
//
-// Returns 3-D with shape `[filter_height, filter_width, depth]`.
-func Dilation2DBackpropFilter(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, rates []int64, padding string) (filter_backprop tf.Output) {
- if scope.Err() != nil {
- return
+// value: The data format for y_backprop, x, x_backprop.
+// Either "NHWC" (default) or "NCHW".
+// If not specified, defaults to "NHWC"
+func FusedBatchNormGradV2DataFormat(value string) FusedBatchNormGradV2Attr {
+ return func(m optionalAttr) {
+ m["data_format"] = value
}
- attrs := map[string]interface{}{"strides": strides, "rates": rates, "padding": padding}
- opspec := tf.OpSpec{
- Type: "Dilation2DBackpropFilter",
- Input: []tf.Input{
- input, filter, out_backprop,
- },
- Attrs: attrs,
+}
+
+// FusedBatchNormGradV2IsTraining sets the optional is_training attribute to value.
+//
+// value: A bool value to indicate the operation is for training (default)
+// or inference.
+// If not specified, defaults to true
+func FusedBatchNormGradV2IsTraining(value bool) FusedBatchNormGradV2Attr {
+ return func(m optionalAttr) {
+ m["is_training"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// Computes rectified linear gradients for a Relu operation.
+// Gradient for batch normalization.
+//
+// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
+// The size of 1D Tensors matches the dimension C of the 4D Tensors.
//
// Arguments:
-// gradients: The backpropagated gradients to the corresponding Relu operation.
-// features: The features passed as input to the corresponding Relu operation, OR
-// the outputs of that operation (both work equivalently).
+// y_backprop: A 4D Tensor for the gradient with respect to y.
+// x: A 4D Tensor for input data.
+// scale: A 1D Tensor for scaling factor, to scale the normalized x.
+// reserve_space_1: When is_training is True, a 1D Tensor for the computed batch
+// mean to be reused in gradient computation. When is_training is
+// False, a 1D Tensor for the population mean to be reused in both
+// 1st and 2nd order gradient computation.
+// reserve_space_2: When is_training is True, a 1D Tensor for the computed batch
+// variance (inverted variance in the cuDNN case) to be reused in
+// gradient computation. When is_training is False, a 1D Tensor
+// for the population variance to be reused in both 1st and 2nd
+// order gradient computation.
//
-// Returns `gradients * (features > 0)`.
-func ReluGrad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output) {
+// Returns A 4D Tensor for the gradient with respect to x.A 1D Tensor for the gradient with respect to scale.A 1D Tensor for the gradient with respect to offset.Unused placeholder to match the mean input in FusedBatchNorm.Unused placeholder to match the variance input
+// in FusedBatchNorm.
+func FusedBatchNormGradV2(scope *Scope, y_backprop tf.Output, x tf.Output, scale tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output, optional ...FusedBatchNormGradV2Attr) (x_backprop tf.Output, scale_backprop tf.Output, offset_backprop tf.Output, reserve_space_3 tf.Output, reserve_space_4 tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "ReluGrad",
+ Type: "FusedBatchNormGradV2",
Input: []tf.Input{
- gradients, features,
+ y_backprop, x, scale, reserve_space_1, reserve_space_2,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
}
-// Computes rectified linear 6: `min(max(features, 0), 6)`.
-func Relu6(scope *Scope, features tf.Output) (activations tf.Output) {
+// Creates a TensorArray for storing the gradients of values in the given handle.
+//
+// If the given TensorArray gradient already exists, returns a reference to it.
+//
+// Locks the size of the original TensorArray by disabling its dynamic size flag.
+//
+// **A note about the input flow_in:**
+//
+// The handle flow_in forces the execution of the gradient lookup to occur
+// only after certain other operations have occurred. For example, when
+// the forward TensorArray is dynamically sized, writes to this TensorArray
+// may resize the object. The gradient TensorArray is statically sized based
+// on the size of the forward TensorArray when this operation executes.
+// Furthermore, the size of the forward TensorArray is frozen by this call.
+// As a result, the flow is used to ensure that the call to generate the gradient
+// TensorArray only happens after all writes are executed.
+//
+// In the case of dynamically sized TensorArrays, gradient computation should
+// only be performed on read operations that have themselves been chained via
+// flow to occur only after all writes have executed. That way the final size
+// of the forward TensorArray is known when this operation is called.
+//
+// **A note about the source attribute:**
+//
+// TensorArray gradient calls use an accumulator TensorArray object. If
+// multiple gradients are calculated and run in the same session, the multiple
+// gradient nodes may accidentally flow through the same accumulator TensorArray.
+// This double counts and generally breaks the TensorArray gradient flow.
+//
+// The solution is to identify which gradient call this particular
+// TensorArray gradient is being called in. This is performed by identifying
+// a unique string (e.g. "gradients", "gradients_1", ...) from the input
+// gradient Tensor's name. This string is used as a suffix when creating
+// the TensorArray gradient object here (the attribute `source`).
+//
+// The attribute `source` is added as a suffix to the forward TensorArray's
+// name when performing the creation / lookup, so that each separate gradient
+// calculation gets its own TensorArray accumulator.
+//
+// Arguments:
+// handle: The handle to the forward TensorArray.
+// flow_in: A float scalar that enforces proper chaining of operations.
+// source: The gradient source string, used to decide which gradient TensorArray
+// to return.
+func TensorArrayGradV3(scope *Scope, handle tf.Output, flow_in tf.Output, source string) (grad_handle tf.Output, flow_out tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"source": source}
opspec := tf.OpSpec{
- Type: "Relu6",
+ Type: "TensorArrayGradV3",
Input: []tf.Input{
- features,
+ handle, flow_in,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1)
}
-// Creates a dataset that contains `count` elements from the `input_dataset`.
+// Compare values of `input` to `threshold` and pack resulting bits into a `uint8`.
//
-// Arguments:
+// Each comparison returns a boolean `true` (if `input_value > threshold`)
+// or and `false` otherwise.
//
-// count: A scalar representing the number of elements from the `input_dataset`
-// that should be taken. A value of `-1` indicates that all of `input_dataset`
-// is taken.
+// This operation is useful for Locality-Sensitive-Hashing (LSH) and other
+// algorithms that use hashing approximations of cosine and `L2` distances;
+// codes can be generated from an input via:
+//
+// ```python
+// codebook_size = 50
+// codebook_bits = codebook_size * 32
+// codebook = tf.get_variable('codebook', [x.shape[-1].value, codebook_bits],
+// dtype=x.dtype,
+// initializer=tf.orthogonal_initializer())
+// codes = compare_and_threshold(tf.matmul(x, codebook), threshold=0.)
+// codes = tf.bitcast(codes, tf.int32) # go from uint8 to int32
+// # now codes has shape x.shape[:-1] + [codebook_size]
+// ```
//
+// **NOTE**: Currently, the innermost dimension of the tensor must be divisible
+// by 8.
//
-func TakeDataset(scope *Scope, input_dataset tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
+// Given an `input` shaped `[s0, s1, ..., s_n]`, the output is
+// a `uint8` tensor shaped `[s0, s1, ..., s_n / 8]`.
+//
+// Arguments:
+// input: Values to compare against `threshold` and bitpack.
+// threshold: Threshold to compare against.
+//
+// Returns The bitpacked comparisons.
+func CompareAndBitpack(scope *Scope, input tf.Output, threshold tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
opspec := tf.OpSpec{
- Type: "TakeDataset",
+ Type: "CompareAndBitpack",
Input: []tf.Input{
- input_dataset, count,
+ input, threshold,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Converts each string in the input Tensor to its hash mod by a number of buckets.
-//
-// The hash function is deterministic on the content of the string within the
-// process.
-//
-// Note that the hash function may change from time to time.
-// This functionality will be deprecated and it's recommended to use
-// `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`.
+// Push an element onto the tensor_array.
//
// Arguments:
+// handle: The handle to a TensorArray.
+// index: The position to write to inside the TensorArray.
+// value: The tensor to write to the TensorArray.
+// flow_in: A float scalar that enforces proper chaining of operations.
//
-// num_buckets: The number of buckets.
-//
-// Returns A Tensor of the same shape as the input `string_tensor`.
-func StringToHashBucket(scope *Scope, string_tensor tf.Output, num_buckets int64) (output tf.Output) {
+// Returns A float scalar that enforces proper chaining of operations.
+func TensorArrayWriteV3(scope *Scope, handle tf.Output, index tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"num_buckets": num_buckets}
opspec := tf.OpSpec{
- Type: "StringToHashBucket",
+ Type: "TensorArrayWriteV3",
Input: []tf.Input{
- string_tensor,
+ handle, index, value, flow_in,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Computes gradients for the exponential linear (Elu) operation.
+// Scatter the data from the input value into specific TensorArray elements.
+//
+// `indices` must be a vector, its length must match the first dim of `value`.
//
// Arguments:
-// gradients: The backpropagated gradients to the corresponding Elu operation.
-// outputs: The outputs of the corresponding Elu operation.
+// handle: The handle to a TensorArray.
+// indices: The locations at which to write the tensor elements.
+// value: The concatenated tensor to write to the TensorArray.
+// flow_in: A float scalar that enforces proper chaining of operations.
//
-// Returns The gradients: `gradients * (outputs + 1)` if outputs < 0,
-// `gradients` otherwise.
-func EluGrad(scope *Scope, gradients tf.Output, outputs tf.Output) (backprops tf.Output) {
+// Returns A float scalar that enforces proper chaining of operations.
+func TensorArrayScatterV3(scope *Scope, handle tf.Output, indices tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "EluGrad",
+ Type: "TensorArrayScatterV3",
Input: []tf.Input{
- gradients, outputs,
+ handle, indices, value, flow_in,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// FractionalAvgPoolGradAttr is an optional argument to FractionalAvgPoolGrad.
-type FractionalAvgPoolGradAttr func(optionalAttr)
+// TensorArrayConcatV3Attr is an optional argument to TensorArrayConcatV3.
+type TensorArrayConcatV3Attr func(optionalAttr)
-// FractionalAvgPoolGradOverlapping sets the optional overlapping attribute to value.
-//
-// value: When set to True, it means when pooling, the values at the boundary
-// of adjacent pooling cells are used by both cells. For example:
-//
-// `index 0 1 2 3 4`
-//
-// `value 20 5 16 3 7`
+// TensorArrayConcatV3ElementShapeExcept0 sets the optional element_shape_except0 attribute to value.
//
-// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
-// The result would be [41/3, 26/3] for fractional avg pooling.
-// If not specified, defaults to false
-func FractionalAvgPoolGradOverlapping(value bool) FractionalAvgPoolGradAttr {
+// value: The expected shape of an element, if known,
+// excluding the first dimension. Used to validate the shapes of
+// TensorArray elements. If this shape is not fully specified, concatenating
+// zero-size TensorArrays is an error.
+// If not specified, defaults to <unknown_rank:true >
+func TensorArrayConcatV3ElementShapeExcept0(value tf.Shape) TensorArrayConcatV3Attr {
return func(m optionalAttr) {
- m["overlapping"] = value
+ m["element_shape_except0"] = value
}
}
-// Computes gradient of the FractionalAvgPool function.
+// Concat the elements from the TensorArray into value `value`.
//
-// Unlike FractionalMaxPoolGrad, we don't need to find arg_max for
-// FractionalAvgPoolGrad, we just need to evenly back-propagate each element of
-// out_backprop to those indices that form the same pooling cell. Therefore, we
-// just need to know the shape of original input tensor, instead of the whole
-// tensor.
+// Takes `T` elements of shapes
+//
+// ```
+// (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...)
+// ```
+//
+// and concatenates them into a Tensor of shape:
+//
+// ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```
+//
+// All elements must have the same shape (excepting the first dimension).
//
// Arguments:
-// orig_input_tensor_shape: Original input tensor shape for `fractional_avg_pool`
-// out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients
-// w.r.t. the output of `fractional_avg_pool`.
-// row_pooling_sequence: row pooling sequence, form pooling region with
-// col_pooling_sequence.
-// col_pooling_sequence: column pooling sequence, form pooling region with
-// row_pooling sequence.
+// handle: The handle to a TensorArray.
+// flow_in: A float scalar that enforces proper chaining of operations.
+// dtype: The type of the elem that is returned.
//
-// Returns 4-D. Gradients w.r.t. the input of `fractional_avg_pool`.
-func FractionalAvgPoolGrad(scope *Scope, orig_input_tensor_shape tf.Output, out_backprop tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output, optional ...FractionalAvgPoolGradAttr) (output tf.Output) {
+// Returns All of the elements in the TensorArray, concatenated along the first
+// axis.A vector of the row sizes of the original T elements in the
+// value output. In the example above, this would be the values:
+// `(n1, n2, ..., n(T-1))`.
+func TensorArrayConcatV3(scope *Scope, handle tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayConcatV3Attr) (value tf.Output, lengths tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"dtype": dtype}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "FractionalAvgPoolGrad",
+ Type: "TensorArrayConcatV3",
Input: []tf.Input{
- orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence,
+ handle, flow_in,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1)
}
-// Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)`
-//
-// if < 0, `scale * features` otherwise.
+// ParameterizedTruncatedNormalAttr is an optional argument to ParameterizedTruncatedNormal.
+type ParameterizedTruncatedNormalAttr func(optionalAttr)
+
+// ParameterizedTruncatedNormalSeed sets the optional seed attribute to value.
//
-// See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
-func Selu(scope *Scope, features tf.Output) (activations tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "Selu",
- Input: []tf.Input{
- features,
- },
+// value: If either `seed` or `seed2` are set to be non-zero, the random number
+// generator is seeded by the given seed. Otherwise, it is seeded by a
+// random seed.
+// If not specified, defaults to 0
+func ParameterizedTruncatedNormalSeed(value int64) ParameterizedTruncatedNormalAttr {
+ return func(m optionalAttr) {
+ m["seed"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// ResourceSparseApplyAdadeltaAttr is an optional argument to ResourceSparseApplyAdadelta.
-type ResourceSparseApplyAdadeltaAttr func(optionalAttr)
-
-// ResourceSparseApplyAdadeltaUseLocking sets the optional use_locking attribute to value.
+// ParameterizedTruncatedNormalSeed2 sets the optional seed2 attribute to value.
//
-// value: If True, updating of the var and accum tensors will be protected by
-// a lock; otherwise the behavior is undefined, but may exhibit less contention.
-// If not specified, defaults to false
-func ResourceSparseApplyAdadeltaUseLocking(value bool) ResourceSparseApplyAdadeltaAttr {
+// value: A second seed to avoid seed collision.
+// If not specified, defaults to 0
+func ParameterizedTruncatedNormalSeed2(value int64) ParameterizedTruncatedNormalAttr {
return func(m optionalAttr) {
- m["use_locking"] = value
+ m["seed2"] = value
}
}
-// var: Should be from a Variable().
+// Outputs random values from a normal distribution. The parameters may each be a
//
-// Arguments:
+// scalar which applies to the entire output, or a vector of length shape[0] which
+// stores the parameters for each batch.
//
-// accum: Should be from a Variable().
-// accum_update: : Should be from a Variable().
-// lr: Learning rate. Must be a scalar.
-// rho: Decay factor. Must be a scalar.
-// epsilon: Constant factor. Must be a scalar.
-// grad: The gradient.
-// indices: A vector of indices into the first dimension of var and accum.
+// Arguments:
+// shape: The shape of the output tensor. Batches are indexed by the 0th dimension.
+// means: The mean parameter of each batch.
+// stdevs: The standard deviation parameter of each batch. Must be greater than 0.
+// minvals: The minimum cutoff. May be -infinity.
+// maxvals: The maximum cutoff. May be +infinity, and must be more than the minval
+// for each batch.
//
-// Returns the created operation.
-func ResourceSparseApplyAdadelta(scope *Scope, var_ tf.Output, accum tf.Output, accum_update tf.Output, lr tf.Output, rho tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyAdadeltaAttr) (o *tf.Operation) {
+// Returns A matrix of shape num_batches x samples_per_batch, filled with random
+// truncated normal values using the parameters for each row.
+func ParameterizedTruncatedNormal(scope *Scope, shape tf.Output, means tf.Output, stdevs tf.Output, minvals tf.Output, maxvals tf.Output, optional ...ParameterizedTruncatedNormalAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -23069,161 +22804,115 @@ func ResourceSparseApplyAdadelta(scope *Scope, var_ tf.Output, accum tf.Output,
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ResourceSparseApplyAdadelta",
+ Type: "ParameterizedTruncatedNormal",
Input: []tf.Input{
- var_, accum, accum_update, lr, rho, epsilon, grad, indices,
+ shape, means, stdevs, minvals, maxvals,
},
Attrs: attrs,
}
- return scope.AddOperation(opspec)
-}
-
-// Returns which elements of x are NaN.
-//
-// @compatibility(numpy)
-// Equivalent to np.isnan
-// @end_compatibility
-func IsNan(scope *Scope, x tf.Output) (y tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "IsNan",
- Input: []tf.Input{
- x,
- },
- }
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Pads a tensor.
+// Returns a diagonal tensor with a given diagonal values.
//
-// This operation pads `input` according to the `paddings` and `constant_values`
-// you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is
-// the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
-// how many padding values to add before the contents of `input` in that dimension,
-// and `paddings[D, 1]` indicates how many padding values to add after the contents
-// of `input` in that dimension. `constant_values` is a scalar tensor of the same
-// type as `input` that indicates the value to use for padding `input`.
+// Given a `diagonal`, this operation returns a tensor with the `diagonal` and
+// everything else padded with zeros. The diagonal is computed as follows:
//
-// The padded size of each dimension D of the output is:
+// Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of
+// rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:
//
-// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
+// `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.
//
// For example:
//
// ```
-// # 't' is [[1, 1], [2, 2]]
-// # 'paddings' is [[1, 1], [2, 2]]
-// # 'constant_values' is 0
-// # rank of 't' is 2
-// pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
-// [0, 0, 1, 1, 0, 0]
-// [0, 0, 2, 2, 0, 0]
-// [0, 0, 0, 0, 0, 0]]
+// # 'diagonal' is [1, 2, 3, 4]
+// tf.diag(diagonal) ==> [[1, 0, 0, 0]
+// [0, 2, 0, 0]
+// [0, 0, 3, 0]
+// [0, 0, 0, 4]]
// ```
-func PadV2(scope *Scope, input tf.Output, paddings tf.Output, constant_values tf.Output) (output tf.Output) {
+//
+// Arguments:
+// diagonal: Rank k tensor where k is at most 1.
+func Diag(scope *Scope, diagonal tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "PadV2",
+ Type: "Diag",
Input: []tf.Input{
- input, paddings, constant_values,
+ diagonal,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Computes gradients for the scaled exponential linear (Selu) operation.
+// Split the data from the input value into TensorArray elements.
+//
+// Assuming that `lengths` takes on values
+//
+// ```(n0, n1, ..., n(T-1))```
+//
+// and that `value` has shape
+//
+// ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```,
+//
+// this splits values into a TensorArray with T tensors.
+//
+// TensorArray index t will be the subtensor of values with starting position
+//
+// ```(n0 + n1 + ... + n(t-1), 0, 0, ...)```
+//
+// and having size
+//
+// ```nt x d0 x d1 x ...```
//
// Arguments:
-// gradients: The backpropagated gradients to the corresponding Selu operation.
-// outputs: The outputs of the corresponding Selu operation.
+// handle: The handle to a TensorArray.
+// value: The concatenated tensor to write to the TensorArray.
+// lengths: The vector of lengths, how to split the rows of value into the
+// TensorArray.
+// flow_in: A float scalar that enforces proper chaining of operations.
//
-// Returns The gradients: `gradients * (outputs + scale * alpha)`
-// if outputs < 0, `scale * gradients` otherwise.
-func SeluGrad(scope *Scope, gradients tf.Output, outputs tf.Output) (backprops tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "SeluGrad",
- Input: []tf.Input{
- gradients, outputs,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// Computes softplus: `log(exp(features) + 1)`.
-func Softplus(scope *Scope, features tf.Output) (activations tf.Output) {
+// Returns A float scalar that enforces proper chaining of operations.
+func TensorArraySplitV3(scope *Scope, handle tf.Output, value tf.Output, lengths tf.Output, flow_in tf.Output) (flow_out tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Softplus",
+ Type: "TensorArraySplitV3",
Input: []tf.Input{
- features,
+ handle, value, lengths, flow_in,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// BatchMatMulAttr is an optional argument to BatchMatMul.
-type BatchMatMulAttr func(optionalAttr)
-
-// BatchMatMulAdjX sets the optional adj_x attribute to value.
-//
-// value: If `True`, adjoint the slices of `x`. Defaults to `False`.
-// If not specified, defaults to false
-func BatchMatMulAdjX(value bool) BatchMatMulAttr {
- return func(m optionalAttr) {
- m["adj_x"] = value
- }
-}
+// SerializeSparseAttr is an optional argument to SerializeSparse.
+type SerializeSparseAttr func(optionalAttr)
-// BatchMatMulAdjY sets the optional adj_y attribute to value.
+// SerializeSparseOutType sets the optional out_type attribute to value.
//
-// value: If `True`, adjoint the slices of `y`. Defaults to `False`.
-// If not specified, defaults to false
-func BatchMatMulAdjY(value bool) BatchMatMulAttr {
+// value: The `dtype` to use for serialization; the supported types are `string`
+// (default) and `variant`.
+// If not specified, defaults to DT_STRING
+func SerializeSparseOutType(value tf.DataType) SerializeSparseAttr {
return func(m optionalAttr) {
- m["adj_y"] = value
+ m["out_type"] = value
}
}
-// Multiplies slices of two tensors in batches.
-//
-// Multiplies all slices of `Tensor` `x` and `y` (each slice can be
-// viewed as an element of a batch), and arranges the individual results
-// in a single output tensor of the same batch size. Each of the
-// individual slices can optionally be adjointed (to adjoint a matrix
-// means to transpose and conjugate it) before multiplication by setting
-// the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
-//
-// The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`
-// and `[..., r_y, c_y]`.
-//
-// The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:
-//
-// r_o = c_x if adj_x else r_x
-// c_o = r_y if adj_y else c_y
-//
-// It is computed as:
-//
-// output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
+// Serialize a `SparseTensor` into a `[3]` `Tensor` object.
//
// Arguments:
-// x: 2-D or higher with shape `[..., r_x, c_x]`.
-// y: 2-D or higher with shape `[..., r_y, c_y]`.
-//
-// Returns 3-D or higher with shape `[..., r_o, c_o]`
-func BatchMatMul(scope *Scope, x tf.Output, y tf.Output, optional ...BatchMatMulAttr) (output tf.Output) {
+// sparse_indices: 2-D. The `indices` of the `SparseTensor`.
+// sparse_values: 1-D. The `values` of the `SparseTensor`.
+// sparse_shape: 1-D. The `shape` of the `SparseTensor`.
+func SerializeSparse(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...SerializeSparseAttr) (serialized_sparse tf.Output) {
if scope.Err() != nil {
return
}
@@ -23232,9 +22921,9 @@ func BatchMatMul(scope *Scope, x tf.Output, y tf.Output, optional ...BatchMatMul
a(attrs)
}
opspec := tf.OpSpec{
- Type: "BatchMatMul",
+ Type: "SerializeSparse",
Input: []tf.Input{
- x, y,
+ sparse_indices, sparse_values, sparse_shape,
},
Attrs: attrs,
}
@@ -23242,311 +22931,327 @@ func BatchMatMul(scope *Scope, x tf.Output, y tf.Output, optional ...BatchMatMul
return op.Output(0)
}
-// Computes softplus gradients for a softplus operation.
+// RandomShuffleQueueV2Attr is an optional argument to RandomShuffleQueueV2.
+type RandomShuffleQueueV2Attr func(optionalAttr)
+
+// RandomShuffleQueueV2Shapes sets the optional shapes attribute to value.
//
-// Arguments:
-// gradients: The backpropagated gradients to the corresponding softplus operation.
-// features: The features passed as input to the corresponding softplus operation.
+// value: The shape of each component in a value. The length of this attr must
+// be either 0 or the same as the length of component_types. If the length of
+// this attr is 0, the shapes of queue elements are not constrained, and
+// only one element may be dequeued at a time.
+// If not specified, defaults to <>
//
-// Returns The gradients: `gradients / (1 + exp(-features))`.
-func SoftplusGrad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "SoftplusGrad",
- Input: []tf.Input{
- gradients, features,
- },
+// REQUIRES: len(value) >= 0
+func RandomShuffleQueueV2Shapes(value []tf.Shape) RandomShuffleQueueV2Attr {
+ return func(m optionalAttr) {
+ m["shapes"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// Computes softsign gradients for a softsign operation.
-//
-// Arguments:
-// gradients: The backpropagated gradients to the corresponding softsign operation.
-// features: The features passed as input to the corresponding softsign operation.
+// RandomShuffleQueueV2Capacity sets the optional capacity attribute to value.
//
-// Returns The gradients: `gradients / (1 + abs(features)) ** 2`.
-func SoftsignGrad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "SoftsignGrad",
- Input: []tf.Input{
- gradients, features,
- },
+// value: The upper bound on the number of elements in this queue.
+// Negative numbers mean no limit.
+// If not specified, defaults to -1
+func RandomShuffleQueueV2Capacity(value int64) RandomShuffleQueueV2Attr {
+ return func(m optionalAttr) {
+ m["capacity"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// DecodeBmpAttr is an optional argument to DecodeBmp.
-type DecodeBmpAttr func(optionalAttr)
+// RandomShuffleQueueV2MinAfterDequeue sets the optional min_after_dequeue attribute to value.
+//
+// value: Dequeue will block unless there would be this
+// many elements after the dequeue or the queue is closed. This
+// ensures a minimum level of mixing of elements.
+// If not specified, defaults to 0
+func RandomShuffleQueueV2MinAfterDequeue(value int64) RandomShuffleQueueV2Attr {
+ return func(m optionalAttr) {
+ m["min_after_dequeue"] = value
+ }
+}
-// DecodeBmpChannels sets the optional channels attribute to value.
+// RandomShuffleQueueV2Seed sets the optional seed attribute to value.
+//
+// value: If either seed or seed2 is set to be non-zero, the random number
+// generator is seeded by the given seed. Otherwise, a random seed is used.
// If not specified, defaults to 0
-func DecodeBmpChannels(value int64) DecodeBmpAttr {
+func RandomShuffleQueueV2Seed(value int64) RandomShuffleQueueV2Attr {
return func(m optionalAttr) {
- m["channels"] = value
+ m["seed"] = value
}
}
-// Decode the first frame of a BMP-encoded image to a uint8 tensor.
+// RandomShuffleQueueV2Seed2 sets the optional seed2 attribute to value.
//
-// The attr `channels` indicates the desired number of color channels for the
-// decoded image.
+// value: A second seed to avoid seed collision.
+// If not specified, defaults to 0
+func RandomShuffleQueueV2Seed2(value int64) RandomShuffleQueueV2Attr {
+ return func(m optionalAttr) {
+ m["seed2"] = value
+ }
+}
+
+// RandomShuffleQueueV2Container sets the optional container attribute to value.
//
-// Accepted values are:
+// value: If non-empty, this queue is placed in the given container.
+// Otherwise, a default container is used.
+// If not specified, defaults to ""
+func RandomShuffleQueueV2Container(value string) RandomShuffleQueueV2Attr {
+ return func(m optionalAttr) {
+ m["container"] = value
+ }
+}
+
+// RandomShuffleQueueV2SharedName sets the optional shared_name attribute to value.
//
-// * 0: Use the number of channels in the BMP-encoded image.
-// * 3: output an RGB image.
-// * 4: output an RGBA image.
+// value: If non-empty, this queue will be shared under the given name
+// across multiple sessions.
+// If not specified, defaults to ""
+func RandomShuffleQueueV2SharedName(value string) RandomShuffleQueueV2Attr {
+ return func(m optionalAttr) {
+ m["shared_name"] = value
+ }
+}
+
+// A queue that randomizes the order of elements.
//
// Arguments:
-// contents: 0-D. The BMP-encoded image.
+// component_types: The type of each component in a value.
//
-// Returns 3-D with shape `[height, width, channels]`. RGB order
-func DecodeBmp(scope *Scope, contents tf.Output, optional ...DecodeBmpAttr) (image tf.Output) {
+// Returns The handle to the queue.
+func RandomShuffleQueueV2(scope *Scope, component_types []tf.DataType, optional ...RandomShuffleQueueV2Attr) (handle tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"component_types": component_types}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "DecodeBmp",
- Input: []tf.Input{
- contents,
- },
+ Type: "RandomShuffleQueueV2",
+
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Computes softmax activations.
+// Draw bounding boxes on a batch of images.
//
-// For each batch `i` and class `j` we have
+// Outputs a copy of `images` but draws on top of the pixels zero or more bounding
+// boxes specified by the locations in `boxes`. The coordinates of the each
+// bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The
+// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
+// height of the underlying image.
//
-// softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))
+// For example, if an image is 100 x 200 pixels (height x width) and the bounding
+// box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of
+// the bounding box will be `(40, 10)` to `(100, 50)` (in (x,y) coordinates).
+//
+// Parts of the bounding box may fall outside the image.
//
// Arguments:
-// logits: 2-D with shape `[batch_size, num_classes]`.
+// images: 4-D with shape `[batch, height, width, depth]`. A batch of images.
+// boxes: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding
+// boxes.
//
-// Returns Same shape as `logits`.
-func Softmax(scope *Scope, logits tf.Output) (softmax tf.Output) {
+// Returns 4-D with the same shape as `images`. The batch of input images with
+// bounding boxes drawn on the images.
+func DrawBoundingBoxes(scope *Scope, images tf.Output, boxes tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Softmax",
+ Type: "DrawBoundingBoxes",
Input: []tf.Input{
- logits,
+ images, boxes,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Returns the truth value of (x <= y) element-wise.
+// LearnedUnigramCandidateSamplerAttr is an optional argument to LearnedUnigramCandidateSampler.
+type LearnedUnigramCandidateSamplerAttr func(optionalAttr)
+
+// LearnedUnigramCandidateSamplerSeed sets the optional seed attribute to value.
//
-// *NOTE*: `LessEqual` supports broadcasting. More about broadcasting
-// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-func LessEqual(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
- if scope.Err() != nil {
- return
+// value: If either seed or seed2 are set to be non-zero, the random number
+// generator is seeded by the given seed. Otherwise, it is seeded by a
+// random seed.
+// If not specified, defaults to 0
+func LearnedUnigramCandidateSamplerSeed(value int64) LearnedUnigramCandidateSamplerAttr {
+ return func(m optionalAttr) {
+ m["seed"] = value
}
- opspec := tf.OpSpec{
- Type: "LessEqual",
- Input: []tf.Input{
- x, y,
- },
+}
+
+// LearnedUnigramCandidateSamplerSeed2 sets the optional seed2 attribute to value.
+//
+// value: An second seed to avoid seed collision.
+// If not specified, defaults to 0
+func LearnedUnigramCandidateSamplerSeed2(value int64) LearnedUnigramCandidateSamplerAttr {
+ return func(m optionalAttr) {
+ m["seed2"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// Computes log softmax activations.
+// Generates labels for candidate sampling with a learned unigram distribution.
//
-// For each batch `i` and class `j` we have
+// See explanations of candidate sampling and the data formats at
+// go/candidate-sampling.
//
-// logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))
+// For each batch, this op picks a single set of sampled candidate labels.
+//
+// The advantages of sampling candidates per-batch are simplicity and the
+// possibility of efficient dense matrix multiplication. The disadvantage is that
+// the sampled candidates must be chosen independently of the context and of the
+// true labels.
//
// Arguments:
-// logits: 2-D with shape `[batch_size, num_classes]`.
+// true_classes: A batch_size * num_true matrix, in which each row contains the
+// IDs of the num_true target_classes in the corresponding original label.
+// num_true: Number of true labels per context.
+// num_sampled: Number of candidates to randomly sample.
+// unique: If unique is true, we sample with rejection, so that all sampled
+// candidates in a batch are unique. This requires some approximation to
+// estimate the post-rejection sampling probabilities.
+// range_max: The sampler will sample integers from the interval [0, range_max).
//
-// Returns Same shape as `logits`.
-func LogSoftmax(scope *Scope, logits tf.Output) (logsoftmax tf.Output) {
+// Returns A vector of length num_sampled, in which each element is
+// the ID of a sampled candidate.A batch_size * num_true matrix, representing
+// the number of times each candidate is expected to occur in a batch
+// of sampled candidates. If unique=true, then this is a probability.A vector of length num_sampled, for each sampled
+// candidate representing the number of times the candidate is expected
+// to occur in a batch of sampled candidates. If unique=true, then this is a
+// probability.
+func LearnedUnigramCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...LearnedUnigramCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "LogSoftmax",
+ Type: "LearnedUnigramCandidateSampler",
Input: []tf.Input{
- logits,
+ true_classes,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// Given a quantized tensor described by (input, input_min, input_max), outputs a
-//
-// range that covers the actual values present in that tensor. This op is
-// typically used to produce the requested_output_min and requested_output_max for
-// Requantize.
+// Computes gradients for the scaled exponential linear (Selu) operation.
//
// Arguments:
+// gradients: The backpropagated gradients to the corresponding Selu operation.
+// outputs: The outputs of the corresponding Selu operation.
//
-// input_min: The float value that the minimum quantized input value represents.
-// input_max: The float value that the maximum quantized input value represents.
-//
-// Returns The computed min output.the computed max output.
-func RequantizationRange(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output) (output_min tf.Output, output_max tf.Output) {
+// Returns The gradients: `gradients * (outputs + scale * alpha)`
+// if outputs < 0, `scale * gradients` otherwise.
+func SeluGrad(scope *Scope, gradients tf.Output, outputs tf.Output) (backprops tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "RequantizationRange",
+ Type: "SeluGrad",
Input: []tf.Input{
- input, input_min, input_max,
+ gradients, outputs,
},
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
+ return op.Output(0)
}
-// Says whether the targets are in the top `K` predictions.
-//
-// This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
-// prediction for the target class is among the top `k` predictions among
-// all predictions for example `i`. Note that the behavior of `InTopK` differs
-// from the `TopK` op in its handling of ties; if multiple classes have the
-// same prediction value and straddle the top-`k` boundary, all of those
-// classes are considered to be in the top `k`.
-//
-// More formally, let
-//
-// \\(predictions_i\\) be the predictions for all classes for example `i`,
-// \\(targets_i\\) be the target class for example `i`,
-// \\(out_i\\) be the output for example `i`,
-//
-// $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
+// Get the current size of the TensorArray.
//
// Arguments:
-// predictions: A `batch_size` x `classes` tensor.
-// targets: A `batch_size` vector of class ids.
-// k: Number of top elements to look at for computing precision.
+// handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
+// flow_in: A float scalar that enforces proper chaining of operations.
//
-// Returns Computed Precision at `k` as a `bool Tensor`.
-func InTopK(scope *Scope, predictions tf.Output, targets tf.Output, k int64) (precision tf.Output) {
+// Returns The current size of the TensorArray.
+func TensorArraySizeV3(scope *Scope, handle tf.Output, flow_in tf.Output) (size tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"k": k}
opspec := tf.OpSpec{
- Type: "InTopK",
+ Type: "TensorArraySizeV3",
Input: []tf.Input{
- predictions, targets,
+ handle, flow_in,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Returns a batched diagonal tensor with a given batched diagonal values.
-//
-// Given a `diagonal`, this operation returns a tensor with the `diagonal` and
-// everything else padded with zeros. The diagonal is computed as follows:
-//
-// Assume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a
-// tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:
-//
-// `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.
-//
-// For example:
-//
-// ```
-// # 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]]
-//
-// and diagonal.shape = (2, 4)
-//
-// tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0]
-// [0, 2, 0, 0]
-// [0, 0, 3, 0]
-// [0, 0, 0, 4]],
-// [[5, 0, 0, 0]
-// [0, 6, 0, 0]
-// [0, 0, 7, 0]
-// [0, 0, 0, 8]]]
-//
-// which has shape (2, 4, 4)
-// ```
-//
-// Arguments:
-// diagonal: Rank `k`, where `k >= 1`.
-//
-// Returns Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`.
-func MatrixDiag(scope *Scope, diagonal tf.Output) (output tf.Output) {
+// Deprecated. Use TensorArrayGradV3
+func TensorArrayWriteV2(scope *Scope, handle tf.Output, index tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "MatrixDiag",
+ Type: "TensorArrayWriteV2",
Input: []tf.Input{
- diagonal,
+ handle, index, value, flow_in,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// MaxPool3DAttr is an optional argument to MaxPool3D.
-type MaxPool3DAttr func(optionalAttr)
+// SparseReduceMaxAttr is an optional argument to SparseReduceMax.
+type SparseReduceMaxAttr func(optionalAttr)
-// MaxPool3DDataFormat sets the optional data_format attribute to value.
+// SparseReduceMaxKeepDims sets the optional keep_dims attribute to value.
//
-// value: The data format of the input and output data. With the
-// default format "NDHWC", the data is stored in the order of:
-// [batch, in_depth, in_height, in_width, in_channels].
-// Alternatively, the format could be "NCDHW", the data storage order is:
-// [batch, in_channels, in_depth, in_height, in_width].
-// If not specified, defaults to "NDHWC"
-func MaxPool3DDataFormat(value string) MaxPool3DAttr {
+// value: If true, retain reduced dimensions with length 1.
+// If not specified, defaults to false
+func SparseReduceMaxKeepDims(value bool) SparseReduceMaxAttr {
return func(m optionalAttr) {
- m["data_format"] = value
+ m["keep_dims"] = value
}
}
-// Performs 3D max pooling on the input.
+// Computes the max of elements across dimensions of a SparseTensor.
+//
+// This Op takes a SparseTensor and is the sparse counterpart to
+// `tf.reduce_max()`. In particular, this Op also returns a dense `Tensor`
+// instead of a sparse one.
+//
+// Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
+// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
+// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
+// with length 1.
+//
+// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
+// with a single element is returned. Additionally, the axes can be negative,
+// which are interpreted according to the indexing rules in Python.
//
// Arguments:
-// input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
-// ksize: 1-D tensor of length 5. The size of the window for each dimension of
-// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
-// strides: 1-D tensor of length 5. The stride of the sliding window for each
-// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
-// padding: The type of padding algorithm to use.
+// input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
+// SparseTensor, possibly not in canonical ordering.
+// input_values: 1-D. `N` non-empty values corresponding to `input_indices`.
+// input_shape: 1-D. Shape of the input SparseTensor.
+// reduction_axes: 1-D. Length-`K` vector containing the reduction axes.
//
-// Returns The max pooled output tensor.
-func MaxPool3D(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPool3DAttr) (output tf.Output) {
+// Returns `R-K`-D. The reduced Tensor.
+func SparseReduceMax(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceMaxAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "MaxPool3D",
+ Type: "SparseReduceMax",
Input: []tf.Input{
- input,
+ input_indices, input_values, input_shape, reduction_axes,
},
Attrs: attrs,
}
@@ -23554,209 +23259,179 @@ func MaxPool3D(scope *Scope, input tf.Output, ksize []int64, strides []int64, pa
return op.Output(0)
}
-// Returns x // y element-wise.
+// AsStringAttr is an optional argument to AsString.
+type AsStringAttr func(optionalAttr)
+
+// AsStringPrecision sets the optional precision attribute to value.
//
-// *NOTE*: `FloorDiv` supports broadcasting. More about broadcasting
-// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-func FloorDiv(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "FloorDiv",
- Input: []tf.Input{
- x, y,
- },
+// value: The post-decimal precision to use for floating point numbers.
+// Only used if precision > -1.
+// If not specified, defaults to -1
+func AsStringPrecision(value int64) AsStringAttr {
+ return func(m optionalAttr) {
+ m["precision"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// TopKAttr is an optional argument to TopK.
-type TopKAttr func(optionalAttr)
-
-// TopKSorted sets the optional sorted attribute to value.
+// AsStringScientific sets the optional scientific attribute to value.
//
-// value: If true the resulting `k` elements will be sorted by the values in
-// descending order.
-// If not specified, defaults to true
-func TopKSorted(value bool) TopKAttr {
+// value: Use scientific notation for floating point numbers.
+// If not specified, defaults to false
+func AsStringScientific(value bool) AsStringAttr {
return func(m optionalAttr) {
- m["sorted"] = value
+ m["scientific"] = value
}
}
-// Finds values and indices of the `k` largest elements for the last dimension.
-//
-// DEPRECATED at GraphDef version 7: Use TopKV2 instead
-//
-// If the input is a vector (rank-1), finds the `k` largest entries in the vector
-// and outputs their values and indices as vectors. Thus `values[j]` is the
-// `j`-th largest entry in `input`, and its index is `indices[j]`.
-//
-// For matrices (resp. higher rank input), computes the top `k` entries in each
-// row (resp. vector along the last dimension). Thus,
-//
-// values.shape = indices.shape = input.shape[:-1] + [k]
+// AsStringShortest sets the optional shortest attribute to value.
//
-// If two elements are equal, the lower-index element appears first.
+// value: Use shortest representation (either scientific or standard) for
+// floating point numbers.
+// If not specified, defaults to false
+func AsStringShortest(value bool) AsStringAttr {
+ return func(m optionalAttr) {
+ m["shortest"] = value
+ }
+}
+
+// AsStringWidth sets the optional width attribute to value.
//
-// If `k` varies dynamically, use `TopKV2` below.
+// value: Pad pre-decimal numbers to this width.
+// Applies to both floating point and integer numbers.
+// Only used if width > -1.
+// If not specified, defaults to -1
+func AsStringWidth(value int64) AsStringAttr {
+ return func(m optionalAttr) {
+ m["width"] = value
+ }
+}
+
+// AsStringFill sets the optional fill attribute to value.
//
-// Arguments:
-// input: 1-D or higher with last dimension at least `k`.
-// k: Number of top elements to look for along the last dimension (along each
-// row for matrices).
+// value: The value to pad if width > -1. If empty, pads with spaces.
+// Another typical value is '0'. String cannot be longer than 1 character.
+// If not specified, defaults to ""
+func AsStringFill(value string) AsStringAttr {
+ return func(m optionalAttr) {
+ m["fill"] = value
+ }
+}
+
+// Converts each entry in the given tensor to strings. Supports many numeric
//
-// Returns The `k` largest elements along each last dimensional slice.The indices of `values` within the last dimension of `input`.
-func TopK(scope *Scope, input tf.Output, k int64, optional ...TopKAttr) (values tf.Output, indices tf.Output) {
+// types and boolean.
+func AsString(scope *Scope, input tf.Output, optional ...AsStringAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"k": k}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "TopK",
+ Type: "AsString",
Input: []tf.Input{
input,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
-}
-
-// TopKV2Attr is an optional argument to TopKV2.
-type TopKV2Attr func(optionalAttr)
-
-// TopKV2Sorted sets the optional sorted attribute to value.
-//
-// value: If true the resulting `k` elements will be sorted by the values in
-// descending order.
-// If not specified, defaults to true
-func TopKV2Sorted(value bool) TopKV2Attr {
- return func(m optionalAttr) {
- m["sorted"] = value
- }
+ return op.Output(0)
}
-// Finds values and indices of the `k` largest elements for the last dimension.
-//
-// If the input is a vector (rank-1), finds the `k` largest entries in the vector
-// and outputs their values and indices as vectors. Thus `values[j]` is the
-// `j`-th largest entry in `input`, and its index is `indices[j]`.
-//
-// For matrices (resp. higher rank input), computes the top `k` entries in each
-// row (resp. vector along the last dimension). Thus,
-//
-// values.shape = indices.shape = input.shape[:-1] + [k]
-//
-// If two elements are equal, the lower-index element appears first.
-//
-// Arguments:
-// input: 1-D or higher with last dimension at least `k`.
-// k: 0-D. Number of top elements to look for along the last dimension (along each
-// row for matrices).
-//
-// Returns The `k` largest elements along each last dimensional slice.The indices of `values` within the last dimension of `input`.
-func TopKV2(scope *Scope, input tf.Output, k tf.Output, optional ...TopKV2Attr) (values tf.Output, indices tf.Output) {
+// Deprecated. Use TensorArrayScatterV3
+func TensorArrayScatterV2(scope *Scope, handle tf.Output, indices tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "TopKV2",
+ Type: "TensorArrayScatterV2",
Input: []tf.Input{
- input, k,
+ handle, indices, value, flow_in,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
+ return op.Output(0)
}
-// RandomCropAttr is an optional argument to RandomCrop.
-type RandomCropAttr func(optionalAttr)
-
-// RandomCropSeed sets the optional seed attribute to value.
+// Applies sparse addition to `input` using individual values or slices
//
-// value: If either seed or seed2 are set to be non-zero, the random number
-// generator is seeded by the given seed. Otherwise, it is seeded by a
-// random seed.
-// If not specified, defaults to 0
-func RandomCropSeed(value int64) RandomCropAttr {
- return func(m optionalAttr) {
- m["seed"] = value
- }
-}
-
-// RandomCropSeed2 sets the optional seed2 attribute to value.
+// from `updates` according to indices `indices`. The updates are non-aliasing:
+// `input` is only modified in-place if no other operations will use it.
+// Otherwise, a copy of `input` is made. This operation has a gradient with
+// respect to both `input` and `updates`.
//
-// value: An second seed to avoid seed collision.
-// If not specified, defaults to 0
-func RandomCropSeed2(value int64) RandomCropAttr {
- return func(m optionalAttr) {
- m["seed2"] = value
- }
-}
-
-// Randomly crop `image`.
+// `input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
//
-// DEPRECATED at GraphDef version 8: Random crop is now pure Python
+// `indices` must be integer tensor, containing indices into `input`.
+// It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
//
-// `size` is a 1-D int64 tensor with 2 elements representing the crop height and
-// width. The values must be non negative.
+// The innermost dimension of `indices` (with length `K`) corresponds to
+// indices into elements (if `K = P`) or `(P-K)`-dimensional slices
+// (if `K < P`) along the `K`th dimension of `input`.
//
-// This Op picks a random location in `image` and crops a `height` by `width`
-// rectangle from that location. The random location is picked so the cropped
-// area will fit inside the original image.
+// `updates` is `Tensor` of rank `Q-1+P-K` with shape:
+//
+// ```
+// [d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].
+// ```
+//
+// For example, say we want to add 4 scattered elements to a rank-1 tensor to 8
+// elements. In Python, that addition would look like this:
+//
+// input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8])
+// indices = tf.constant([[4], [3], [1], [7]])
+// updates = tf.constant([9, 10, 11, 12])
+// output = tf.scatter_nd_non_aliasing_add(input, indices, updates)
+// with tf.Session() as sess:
+// print(sess.run(output))
+//
+// The resulting value `output` would look like this:
+//
+// [1, 13, 3, 14, 14, 6, 7, 20]
+//
+// See @{tf.scatter_nd} for more details about how to make updates to slices.
//
// Arguments:
-// image: 3-D of shape `[height, width, channels]`.
-// size: 1-D of length 2 containing: `crop_height`, `crop_width`..
+// input: A Tensor.
+// indices: A Tensor. Must be one of the following types: `int32`, `int64`.
+// A tensor of indices into `input`.
+// updates: A Tensor. Must have the same type as ref. A tensor of updated values
+// to add to `input`.
//
-// Returns 3-D of shape `[crop_height, crop_width, channels].`
-func RandomCrop(scope *Scope, image tf.Output, size tf.Output, optional ...RandomCropAttr) (output tf.Output) {
+// Returns A `Tensor` with the same shape as `input`, containing values of `input`
+// updated with `updates`.
+func ScatterNdNonAliasingAdd(scope *Scope, input tf.Output, indices tf.Output, updates tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "RandomCrop",
+ Type: "ScatterNdNonAliasingAdd",
Input: []tf.Input{
- image, size,
+ input, indices, updates,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// FractionalAvgPoolAttr is an optional argument to FractionalAvgPool.
-type FractionalAvgPoolAttr func(optionalAttr)
+// FractionalMaxPoolAttr is an optional argument to FractionalMaxPool.
+type FractionalMaxPoolAttr func(optionalAttr)
-// FractionalAvgPoolPseudoRandom sets the optional pseudo_random attribute to value.
+// FractionalMaxPoolPseudoRandom sets the optional pseudo_random attribute to value.
//
// value: When set to True, generates the pooling sequence in a
// pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin
// Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for
// difference between pseudorandom and random.
// If not specified, defaults to false
-func FractionalAvgPoolPseudoRandom(value bool) FractionalAvgPoolAttr {
+func FractionalMaxPoolPseudoRandom(value bool) FractionalMaxPoolAttr {
return func(m optionalAttr) {
m["pseudo_random"] = value
}
}
-// FractionalAvgPoolOverlapping sets the optional overlapping attribute to value.
+// FractionalMaxPoolOverlapping sets the optional overlapping attribute to value.
//
// value: When set to True, it means when pooling, the values at the boundary
// of adjacent pooling cells are used by both cells. For example:
@@ -23766,54 +23441,78 @@ func FractionalAvgPoolPseudoRandom(value bool) FractionalAvgPoolAttr {
// `value 20 5 16 3 7`
//
// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
-// The result would be [41/3, 26/3] for fractional avg pooling.
+// The result would be [20, 16] for fractional max pooling.
// If not specified, defaults to false
-func FractionalAvgPoolOverlapping(value bool) FractionalAvgPoolAttr {
+func FractionalMaxPoolOverlapping(value bool) FractionalMaxPoolAttr {
return func(m optionalAttr) {
m["overlapping"] = value
}
}
-// FractionalAvgPoolDeterministic sets the optional deterministic attribute to value.
+// FractionalMaxPoolDeterministic sets the optional deterministic attribute to value.
//
// value: When set to True, a fixed pooling region will be used when
-// iterating over a FractionalAvgPool node in the computation graph. Mainly used
-// in unit test to make FractionalAvgPool deterministic.
+// iterating over a FractionalMaxPool node in the computation graph. Mainly used
+// in unit test to make FractionalMaxPool deterministic.
// If not specified, defaults to false
-func FractionalAvgPoolDeterministic(value bool) FractionalAvgPoolAttr {
+func FractionalMaxPoolDeterministic(value bool) FractionalMaxPoolAttr {
return func(m optionalAttr) {
m["deterministic"] = value
}
}
-// FractionalAvgPoolSeed sets the optional seed attribute to value.
+// FractionalMaxPoolSeed sets the optional seed attribute to value.
//
// value: If either seed or seed2 are set to be non-zero, the random number
// generator is seeded by the given seed. Otherwise, it is seeded by a
// random seed.
// If not specified, defaults to 0
-func FractionalAvgPoolSeed(value int64) FractionalAvgPoolAttr {
+func FractionalMaxPoolSeed(value int64) FractionalMaxPoolAttr {
return func(m optionalAttr) {
m["seed"] = value
}
}
-// FractionalAvgPoolSeed2 sets the optional seed2 attribute to value.
+// FractionalMaxPoolSeed2 sets the optional seed2 attribute to value.
//
// value: An second seed to avoid seed collision.
// If not specified, defaults to 0
-func FractionalAvgPoolSeed2(value int64) FractionalAvgPoolAttr {
+func FractionalMaxPoolSeed2(value int64) FractionalMaxPoolAttr {
return func(m optionalAttr) {
m["seed2"] = value
}
}
-// Performs fractional average pooling on the input.
+// Performs fractional max pooling on the input.
//
-// Fractional average pooling is similar to Fractional max pooling in the pooling
-// region generation step. The only difference is that after pooling regions are
-// generated, a mean operation is performed instead of a max operation in each
-// pooling region.
+// Fractional max pooling is slightly different than regular max pooling. In
+// regular max pooling, you downsize an input set by taking the maximum value of
+// smaller N x N subsections of the set (often 2x2), and try to reduce the set by
+// a factor of N, where N is an integer. Fractional max pooling, as you might
+// expect from the word "fractional", means that the overall reduction ratio N
+// does not have to be an integer.
+//
+// The sizes of the pooling regions are generated randomly but are fairly uniform.
+// For example, let's look at the height dimension, and the constraints on the
+// list of rows that will be pool boundaries.
+//
+// First we define the following:
+//
+// 1. input_row_length : the number of rows from the input set
+// 2. output_row_length : which will be smaller than the input
+// 3. alpha = input_row_length / output_row_length : our reduction ratio
+// 4. K = floor(alpha)
+// 5. row_pooling_sequence : this is the result list of pool boundary rows
+//
+// Then, row_pooling_sequence should satisfy:
+//
+// 1. a[0] = 0 : the first value of the sequence is 0
+// 2. a[end] = input_row_length : the last value of the sequence is the size
+// 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
+// 4. length(row_pooling_sequence) = output_row_length+1
+//
+// For more details on fractional max pooling, see this paper:
+// [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
//
// Arguments:
// value: 4-D with shape `[batch, height, width, channels]`.
@@ -23824,8 +23523,8 @@ func FractionalAvgPoolSeed2(value int64) FractionalAvgPoolAttr {
// dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions
// respectively.
//
-// Returns output tensor after fractional avg pooling.row pooling sequence, needed to calculate gradient.column pooling sequence, needed to calculate gradient.
-func FractionalAvgPool(scope *Scope, value tf.Output, pooling_ratio []float32, optional ...FractionalAvgPoolAttr) (output tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output) {
+// Returns output tensor after fractional max pooling.row pooling sequence, needed to calculate gradient.column pooling sequence, needed to calculate gradient.
+func FractionalMaxPool(scope *Scope, value tf.Output, pooling_ratio []float32, optional ...FractionalMaxPoolAttr) (output tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output) {
if scope.Err() != nil {
return
}
@@ -23834,7 +23533,7 @@ func FractionalAvgPool(scope *Scope, value tf.Output, pooling_ratio []float32, o
a(attrs)
}
opspec := tf.OpSpec{
- Type: "FractionalAvgPool",
+ Type: "FractionalMaxPool",
Input: []tf.Input{
value,
},
@@ -23844,171 +23543,108 @@ func FractionalAvgPool(scope *Scope, value tf.Output, pooling_ratio []float32, o
return op.Output(0), op.Output(1), op.Output(2)
}
-// Updates the table to associates keys with values.
-//
-// The tensor `keys` must be of the same type as the keys of the table.
-// The tensor `values` must be of the type of the table values.
-//
-// Arguments:
-// table_handle: Handle to the table.
-// keys: Any shape. Keys to look up.
-// values: Values to associate with keys.
-//
-// Returns the created operation.
-func LookupTableInsertV2(scope *Scope, table_handle tf.Output, keys tf.Output, values tf.Output) (o *tf.Operation) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "LookupTableInsertV2",
- Input: []tf.Input{
- table_handle, keys, values,
- },
- }
- return scope.AddOperation(opspec)
-}
-
-// Produces the average pool of the input tensor for quantized types.
-//
-// Arguments:
-// input: 4-D with shape `[batch, height, width, channels]`.
-// min_input: The float value that the lowest quantized input value represents.
-// max_input: The float value that the highest quantized input value represents.
-// ksize: The size of the window for each dimension of the input tensor.
-// The length must be 4 to match the number of dimensions of the input.
-// strides: The stride of the sliding window for each dimension of the input
-// tensor. The length must be 4 to match the number of dimensions of the input.
-// padding: The type of padding algorithm to use.
-//
-// Returns The float value that the lowest quantized output value represents.The float value that the highest quantized output value represents.
-func QuantizedAvgPool(scope *Scope, input tf.Output, min_input tf.Output, max_input tf.Output, ksize []int64, strides []int64, padding string) (output tf.Output, min_output tf.Output, max_output tf.Output) {
+// Deprecated. Use TensorArraySizeV3
+func TensorArraySizeV2(scope *Scope, handle tf.Output, flow_in tf.Output) (size tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
opspec := tf.OpSpec{
- Type: "QuantizedAvgPool",
+ Type: "TensorArraySizeV2",
Input: []tf.Input{
- input, min_input, max_input,
+ handle, flow_in,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
+ return op.Output(0)
}
-// Adds Tensor 'bias' to Tensor 'input' for Quantized types.
-//
-// Broadcasts the values of bias on dimensions 0..N-2 of 'input'.
-//
-// Arguments:
-//
-// bias: A 1D bias Tensor with size matching the last dimension of 'input'.
-// min_input: The float value that the lowest quantized input value represents.
-// max_input: The float value that the highest quantized input value represents.
-// min_bias: The float value that the lowest quantized bias value represents.
-// max_bias: The float value that the highest quantized bias value represents.
-//
-//
-// Returns The float value that the lowest quantized output value represents.The float value that the highest quantized output value represents.
-func QuantizedBiasAdd(scope *Scope, input tf.Output, bias tf.Output, min_input tf.Output, max_input tf.Output, min_bias tf.Output, max_bias tf.Output, out_type tf.DataType) (output tf.Output, min_out tf.Output, max_out tf.Output) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{"out_type": out_type}
- opspec := tf.OpSpec{
- Type: "QuantizedBiasAdd",
- Input: []tf.Input{
- input, bias, min_input, max_input, min_bias, max_bias,
- },
- Attrs: attrs,
+// Conv2DAttr is an optional argument to Conv2D.
+type Conv2DAttr func(optionalAttr)
+
+// Conv2DUseCudnnOnGpu sets the optional use_cudnn_on_gpu attribute to value.
+// If not specified, defaults to true
+func Conv2DUseCudnnOnGpu(value bool) Conv2DAttr {
+ return func(m optionalAttr) {
+ m["use_cudnn_on_gpu"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
}
-// Creates summary database writer accessible by given resource handle.
-//
-// This can be used to write tensors from the execution graph directly
-// to a database. Only SQLite is supported right now. This function
-// will create the schema if it doesn't exist. Entries in the Users,
-// Experiments, and Runs tables will be created automatically if they
-// don't already exist.
-//
-// Arguments:
-// writer: Handle to SummaryWriter resource to overwrite.
-// db_uri: For example "file:/tmp/foo.sqlite".
-// experiment_name: Can't contain ASCII control characters or <>. Case
-// sensitive. If empty, then the Run will not be associated with any
-// Experiment.
-// run_name: Can't contain ASCII control characters or <>. Case sensitive.
-// If empty, then each Tag will not be associated with any Run.
-// user_name: Must be valid as both a DNS label and Linux username. If
-// empty, then the Experiment will not be associated with any User.
+// Conv2DDataFormat sets the optional data_format attribute to value.
//
-// Returns the created operation.
-func CreateSummaryDbWriter(scope *Scope, writer tf.Output, db_uri tf.Output, experiment_name tf.Output, run_name tf.Output, user_name tf.Output) (o *tf.Operation) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "CreateSummaryDbWriter",
- Input: []tf.Input{
- writer, db_uri, experiment_name, run_name, user_name,
- },
+// value: Specify the data format of the input and output data. With the
+// default format "NHWC", the data is stored in the order of:
+// [batch, height, width, channels].
+// Alternatively, the format could be "NCHW", the data storage order of:
+// [batch, channels, height, width].
+// If not specified, defaults to "NHWC"
+func Conv2DDataFormat(value string) Conv2DAttr {
+ return func(m optionalAttr) {
+ m["data_format"] = value
}
- return scope.AddOperation(opspec)
}
-// HistogramFixedWidthAttr is an optional argument to HistogramFixedWidth.
-type HistogramFixedWidthAttr func(optionalAttr)
-
-// HistogramFixedWidthDtype sets the optional dtype attribute to value.
-// If not specified, defaults to DT_INT32
-func HistogramFixedWidthDtype(value tf.DataType) HistogramFixedWidthAttr {
+// Conv2DDilations sets the optional dilations attribute to value.
+//
+// value: 1-D tensor of length 4. The dilation factor for each dimension of
+// `input`. If set to k > 1, there will be k-1 skipped cells between each
+// filter element on that dimension. The dimension order is determined by the
+// value of `data_format`, see above for details. Dilations in the batch and
+// depth dimensions must be 1.
+// If not specified, defaults to <i:1 i:1 i:1 i:1 >
+func Conv2DDilations(value []int64) Conv2DAttr {
return func(m optionalAttr) {
- m["dtype"] = value
+ m["dilations"] = value
}
}
-// Return histogram of values.
+// Computes a 2-D convolution given 4-D `input` and `filter` tensors.
//
-// Given the tensor `values`, this operation returns a rank 1 histogram counting
-// the number of entries in `values` that fall into every bin. The bins are
-// equal width and determined by the arguments `value_range` and `nbins`.
+// Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
+// and a filter / kernel tensor of shape
+// `[filter_height, filter_width, in_channels, out_channels]`, this op
+// performs the following:
//
-// ```python
-// # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
-// nbins = 5
-// value_range = [0.0, 5.0]
-// new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
+// 1. Flattens the filter to a 2-D matrix with shape
+// `[filter_height * filter_width * in_channels, output_channels]`.
+// 2. Extracts image patches from the input tensor to form a *virtual*
+// tensor of shape `[batch, out_height, out_width,
+// filter_height * filter_width * in_channels]`.
+// 3. For each patch, right-multiplies the filter matrix and the image patch
+// vector.
//
-// with tf.get_default_session() as sess:
-// hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)
-// variables.global_variables_initializer().run()
-// sess.run(hist) => [2, 1, 1, 0, 2]
-// ```
+// In detail, with the default NHWC format,
+//
+// output[b, i, j, k] =
+// sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
+// filter[di, dj, q, k]
+//
+// Must have `strides[0] = strides[3] = 1`. For the most common case of the same
+// horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
//
// Arguments:
-// values: Numeric `Tensor`.
-// value_range: Shape [2] `Tensor` of same `dtype` as `values`.
-// values <= value_range[0] will be mapped to hist[0],
-// values >= value_range[1] will be mapped to hist[-1].
-// nbins: Scalar `int32 Tensor`. Number of histogram bins.
+// input: A 4-D tensor. The dimension order is interpreted according to the value
+// of `data_format`, see below for details.
+// filter: A 4-D tensor of shape
+// `[filter_height, filter_width, in_channels, out_channels]`
+// strides: 1-D tensor of length 4. The stride of the sliding window for each
+// dimension of `input`. The dimension order is determined by the value of
+// `data_format`, see below for details.
+// padding: The type of padding algorithm to use.
//
-// Returns A 1-D `Tensor` holding histogram of values.
-func HistogramFixedWidth(scope *Scope, values tf.Output, value_range tf.Output, nbins tf.Output, optional ...HistogramFixedWidthAttr) (out tf.Output) {
+// Returns A 4-D tensor. The dimension order is determined by the value of
+// `data_format`, see below for details.
+func Conv2D(scope *Scope, input tf.Output, filter tf.Output, strides []int64, padding string, optional ...Conv2DAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"strides": strides, "padding": padding}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "HistogramFixedWidth",
+ Type: "Conv2D",
Input: []tf.Input{
- values, value_range, nbins,
+ input, filter,
},
Attrs: attrs,
}
@@ -24016,99 +23652,51 @@ func HistogramFixedWidth(scope *Scope, values tf.Output, value_range tf.Output,
return op.Output(0)
}
-// Quantized Batch normalization.
-//
-// This op is deprecated and will be removed in the future. Prefer
-// `tf.nn.batch_normalization`.
-//
-// Arguments:
-// t: A 4D input Tensor.
-// t_min: The value represented by the lowest quantized input.
-// t_max: The value represented by the highest quantized input.
-// m: A 1D mean Tensor with size matching the last dimension of t.
-// This is the first output from tf.nn.moments,
-// or a saved moving average thereof.
-// m_min: The value represented by the lowest quantized mean.
-// m_max: The value represented by the highest quantized mean.
-// v: A 1D variance Tensor with size matching the last dimension of t.
-// This is the second output from tf.nn.moments,
-// or a saved moving average thereof.
-// v_min: The value represented by the lowest quantized variance.
-// v_max: The value represented by the highest quantized variance.
-// beta: A 1D beta Tensor with size matching the last dimension of t.
-// An offset to be added to the normalized tensor.
-// beta_min: The value represented by the lowest quantized offset.
-// beta_max: The value represented by the highest quantized offset.
-// gamma: A 1D gamma Tensor with size matching the last dimension of t.
-// If "scale_after_normalization" is true, this tensor will be multiplied
-// with the normalized tensor.
-// gamma_min: The value represented by the lowest quantized gamma.
-// gamma_max: The value represented by the highest quantized gamma.
-//
-// variance_epsilon: A small float number to avoid dividing by 0.
-// scale_after_normalization: A bool indicating whether the resulted tensor
-// needs to be multiplied with gamma.
-func QuantizedBatchNormWithGlobalNormalization(scope *Scope, t tf.Output, t_min tf.Output, t_max tf.Output, m tf.Output, m_min tf.Output, m_max tf.Output, v tf.Output, v_min tf.Output, v_max tf.Output, beta tf.Output, beta_min tf.Output, beta_max tf.Output, gamma tf.Output, gamma_min tf.Output, gamma_max tf.Output, out_type tf.DataType, variance_epsilon float32, scale_after_normalization bool) (result tf.Output, result_min tf.Output, result_max tf.Output) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{"out_type": out_type, "variance_epsilon": variance_epsilon, "scale_after_normalization": scale_after_normalization}
- opspec := tf.OpSpec{
- Type: "QuantizedBatchNormWithGlobalNormalization",
- Input: []tf.Input{
- t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max,
- },
- Attrs: attrs,
+// FakeQuantWithMinMaxArgsAttr is an optional argument to FakeQuantWithMinMaxArgs.
+type FakeQuantWithMinMaxArgsAttr func(optionalAttr)
+
+// FakeQuantWithMinMaxArgsMin sets the optional min attribute to value.
+// If not specified, defaults to -6
+func FakeQuantWithMinMaxArgsMin(value float32) FakeQuantWithMinMaxArgsAttr {
+ return func(m optionalAttr) {
+ m["min"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
}
-// Add all input tensors element wise.
-//
-// Arguments:
-// inputs: Must all be the same size and shape.
-func AddN(scope *Scope, inputs []tf.Output) (sum tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "AddN",
- Input: []tf.Input{
- tf.OutputList(inputs),
- },
+// FakeQuantWithMinMaxArgsMax sets the optional max attribute to value.
+// If not specified, defaults to 6
+func FakeQuantWithMinMaxArgsMax(value float32) FakeQuantWithMinMaxArgsAttr {
+ return func(m optionalAttr) {
+ m["max"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// MaxAttr is an optional argument to Max.
-type MaxAttr func(optionalAttr)
+// FakeQuantWithMinMaxArgsNumBits sets the optional num_bits attribute to value.
+// If not specified, defaults to 8
+func FakeQuantWithMinMaxArgsNumBits(value int64) FakeQuantWithMinMaxArgsAttr {
+ return func(m optionalAttr) {
+ m["num_bits"] = value
+ }
+}
-// MaxKeepDims sets the optional keep_dims attribute to value.
-//
-// value: If true, retain reduced dimensions with length 1.
+// FakeQuantWithMinMaxArgsNarrowRange sets the optional narrow_range attribute to value.
// If not specified, defaults to false
-func MaxKeepDims(value bool) MaxAttr {
+func FakeQuantWithMinMaxArgsNarrowRange(value bool) FakeQuantWithMinMaxArgsAttr {
return func(m optionalAttr) {
- m["keep_dims"] = value
+ m["narrow_range"] = value
}
}
-// Computes the maximum of elements across dimensions of a tensor.
-//
-// Reduces `input` along the dimensions given in `axis`. Unless
-// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
-// `axis`. If `keep_dims` is true, the reduced dimensions are
-// retained with length 1.
+// Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type.
//
-// Arguments:
-// input: The tensor to reduce.
-// axis: The dimensions to reduce. Must be in the range
-// `[-rank(input), rank(input))`.
+// Attributes `[min; max]` define the clamping range for the `inputs` data.
+// `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
+// when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
+// then de-quantized and output as floats in `[min; max]` interval.
+// `num_bits` is the bitwidth of the quantization; between 2 and 8, inclusive.
//
-// Returns The reduced tensor.
-func Max(scope *Scope, input tf.Output, axis tf.Output, optional ...MaxAttr) (output tf.Output) {
+// Quantization is called fake since the output is still in floating point.
+func FakeQuantWithMinMaxArgs(scope *Scope, inputs tf.Output, optional ...FakeQuantWithMinMaxArgsAttr) (outputs tf.Output) {
if scope.Err() != nil {
return
}
@@ -24117,9 +23705,9 @@ func Max(scope *Scope, input tf.Output, axis tf.Output, optional ...MaxAttr) (ou
a(attrs)
}
opspec := tf.OpSpec{
- Type: "Max",
+ Type: "FakeQuantWithMinMaxArgs",
Input: []tf.Input{
- input, axis,
+ inputs,
},
Attrs: attrs,
}
@@ -24127,59 +23715,67 @@ func Max(scope *Scope, input tf.Output, axis tf.Output, optional ...MaxAttr) (ou
return op.Output(0)
}
-// Cast x of type SrcT to y of DstT.
-func Cast(scope *Scope, x tf.Output, DstT tf.DataType) (y tf.Output) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{"DstT": DstT}
- opspec := tf.OpSpec{
- Type: "Cast",
- Input: []tf.Input{
- x,
- },
- Attrs: attrs,
+// StageAttr is an optional argument to Stage.
+type StageAttr func(optionalAttr)
+
+// StageCapacity sets the optional capacity attribute to value.
+//
+// value: Maximum number of elements in the Staging Area. If > 0, inserts
+// on the container will block when the capacity is reached.
+// If not specified, defaults to 0
+//
+// REQUIRES: value >= 0
+func StageCapacity(value int64) StageAttr {
+ return func(m optionalAttr) {
+ m["capacity"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// Returns the truth value of x AND y element-wise.
+// StageMemoryLimit sets the optional memory_limit attribute to value.
//
-// *NOTE*: `LogicalAnd` supports broadcasting. More about broadcasting
-// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-func LogicalAnd(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "LogicalAnd",
- Input: []tf.Input{
- x, y,
- },
+// value: The maximum number of bytes allowed for Tensors in the Staging Area.
+// If > 0, inserts will block until sufficient space is available.
+// If not specified, defaults to 0
+//
+// REQUIRES: value >= 0
+func StageMemoryLimit(value int64) StageAttr {
+ return func(m optionalAttr) {
+ m["memory_limit"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// ComplexAbsAttr is an optional argument to ComplexAbs.
-type ComplexAbsAttr func(optionalAttr)
+// StageContainer sets the optional container attribute to value.
+//
+// value: If non-empty, this queue is placed in the given container. Otherwise,
+// a default container is used.
+// If not specified, defaults to ""
+func StageContainer(value string) StageAttr {
+ return func(m optionalAttr) {
+ m["container"] = value
+ }
+}
-// ComplexAbsTout sets the optional Tout attribute to value.
-// If not specified, defaults to DT_FLOAT
-func ComplexAbsTout(value tf.DataType) ComplexAbsAttr {
+// StageSharedName sets the optional shared_name attribute to value.
+//
+// value: It is necessary to match this name to the matching Unstage Op.
+// If not specified, defaults to ""
+func StageSharedName(value string) StageAttr {
return func(m optionalAttr) {
- m["Tout"] = value
+ m["shared_name"] = value
}
}
-// Computes the complex absolute value of a tensor.
+// Stage values similar to a lightweight Enqueue.
//
-// Given a tensor `x` of complex numbers, this operation returns a tensor of type
-// `float` or `double` that is the absolute value of each element in `x`. All
-// elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute
-// value is computed as \\( \sqrt{a^2 + b^2}\\).
-func ComplexAbs(scope *Scope, x tf.Output, optional ...ComplexAbsAttr) (y tf.Output) {
+// The basic functionality of this Op is similar to a queue with many
+// fewer capabilities and options. This Op is optimized for performance.
+//
+// Arguments:
+// values: a list of tensors
+// dtypes A list of data types that inserted values should adhere to.
+//
+// Returns the created operation.
+func Stage(scope *Scope, values []tf.Output, optional ...StageAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
@@ -24188,76 +23784,60 @@ func ComplexAbs(scope *Scope, x tf.Output, optional ...ComplexAbsAttr) (y tf.Out
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ComplexAbs",
+ Type: "Stage",
Input: []tf.Input{
- x,
+ tf.OutputList(values),
},
Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// Computes the reciprocal of x element-wise.
-//
-// I.e., \\(y = 1 / x\\).
-func Inv(scope *Scope, x tf.Output) (y tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "Inv",
- Input: []tf.Input{
- x,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// OrderedMapClearAttr is an optional argument to OrderedMapClear.
-type OrderedMapClearAttr func(optionalAttr)
+// StagePeekAttr is an optional argument to StagePeek.
+type StagePeekAttr func(optionalAttr)
-// OrderedMapClearCapacity sets the optional capacity attribute to value.
+// StagePeekCapacity sets the optional capacity attribute to value.
// If not specified, defaults to 0
//
// REQUIRES: value >= 0
-func OrderedMapClearCapacity(value int64) OrderedMapClearAttr {
+func StagePeekCapacity(value int64) StagePeekAttr {
return func(m optionalAttr) {
m["capacity"] = value
}
}
-// OrderedMapClearMemoryLimit sets the optional memory_limit attribute to value.
+// StagePeekMemoryLimit sets the optional memory_limit attribute to value.
// If not specified, defaults to 0
//
// REQUIRES: value >= 0
-func OrderedMapClearMemoryLimit(value int64) OrderedMapClearAttr {
+func StagePeekMemoryLimit(value int64) StagePeekAttr {
return func(m optionalAttr) {
m["memory_limit"] = value
}
}
-// OrderedMapClearContainer sets the optional container attribute to value.
+// StagePeekContainer sets the optional container attribute to value.
// If not specified, defaults to ""
-func OrderedMapClearContainer(value string) OrderedMapClearAttr {
+func StagePeekContainer(value string) StagePeekAttr {
return func(m optionalAttr) {
m["container"] = value
}
}
-// OrderedMapClearSharedName sets the optional shared_name attribute to value.
+// StagePeekSharedName sets the optional shared_name attribute to value.
// If not specified, defaults to ""
-func OrderedMapClearSharedName(value string) OrderedMapClearAttr {
+func StagePeekSharedName(value string) StagePeekAttr {
return func(m optionalAttr) {
m["shared_name"] = value
}
}
-// Op removes all elements in the underlying container.
+// Op peeks at the values at the specified index. If the
//
-// Returns the created operation.
-func OrderedMapClear(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapClearAttr) (o *tf.Operation) {
+// underlying container does not contain sufficient elements
+// this op will block until it does. This Op is optimized for
+// performance.
+func StagePeek(scope *Scope, index tf.Output, dtypes []tf.DataType, optional ...StagePeekAttr) (values []tf.Output) {
if scope.Err() != nil {
return
}
@@ -24266,590 +23846,386 @@ func OrderedMapClear(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapC
a(attrs)
}
opspec := tf.OpSpec{
- Type: "OrderedMapClear",
-
- Attrs: attrs,
- }
- return scope.AddOperation(opspec)
-}
-
-// Returns the element-wise max of two SparseTensors.
-//
-// Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
-//
-// Arguments:
-// a_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
-// SparseTensor, in the canonical lexicographic ordering.
-// a_values: 1-D. `N` non-empty values corresponding to `a_indices`.
-// a_shape: 1-D. Shape of the input SparseTensor.
-// b_indices: counterpart to `a_indices` for the other operand.
-// b_values: counterpart to `a_values` for the other operand; must be of the same dtype.
-// b_shape: counterpart to `a_shape` for the other operand; the two shapes must be equal.
-//
-// Returns 2-D. The indices of the output SparseTensor.1-D. The values of the output SparseTensor.
-func SparseSparseMaximum(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b_indices tf.Output, b_values tf.Output, b_shape tf.Output) (output_indices tf.Output, output_values tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "SparseSparseMaximum",
+ Type: "StagePeek",
Input: []tf.Input{
- a_indices, a_values, a_shape, b_indices, b_values, b_shape,
+ index,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
-}
-
-// Computes the gradient for the inverse of `x` wrt its input.
-//
-// Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
-// is the corresponding input gradient.
-func InvGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
- opspec := tf.OpSpec{
- Type: "InvGrad",
- Input: []tf.Input{
- y, dy,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// Computes the reciprocal of x element-wise.
-//
-// I.e., \\(y = 1 / x\\).
-func Reciprocal(scope *Scope, x tf.Output) (y tf.Output) {
- if scope.Err() != nil {
+ var idx int
+ var err error
+ if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
+ scope.UpdateErr("StagePeek", err)
return
}
- opspec := tf.OpSpec{
- Type: "Reciprocal",
- Input: []tf.Input{
- x,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return values
}
-// Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise.
-//
-// See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
-// ](http://arxiv.org/abs/1511.07289)
-func Elu(scope *Scope, features tf.Output) (activations tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "Elu",
- Input: []tf.Input{
- features,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
+// Conv3DBackpropInputV2Attr is an optional argument to Conv3DBackpropInputV2.
+type Conv3DBackpropInputV2Attr func(optionalAttr)
-// Computes square of x element-wise.
+// Conv3DBackpropInputV2DataFormat sets the optional data_format attribute to value.
//
-// I.e., \\(y = x * x = x^2\\).
-func Square(scope *Scope, x tf.Output) (y tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "Square",
- Input: []tf.Input{
- x,
- },
+// value: The data format of the input and output data. With the
+// default format "NDHWC", the data is stored in the order of:
+// [batch, in_depth, in_height, in_width, in_channels].
+// Alternatively, the format could be "NCDHW", the data storage order is:
+// [batch, in_channels, in_depth, in_height, in_width].
+// If not specified, defaults to "NDHWC"
+func Conv3DBackpropInputV2DataFormat(value string) Conv3DBackpropInputV2Attr {
+ return func(m optionalAttr) {
+ m["data_format"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// Returns element-wise remainder of division. When `x < 0` xor `y < 0` is
-//
-// true, this follows Python semantics in that the result here is consistent
-// with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.
+// Conv3DBackpropInputV2Dilations sets the optional dilations attribute to value.
//
-// *NOTE*: `FloorMod` supports broadcasting. More about broadcasting
-// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-func FloorMod(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "FloorMod",
- Input: []tf.Input{
- x, y,
- },
+// value: 1-D tensor of length 5. The dilation factor for each dimension of
+// `input`. If set to k > 1, there will be k-1 skipped cells between each
+// filter element on that dimension. The dimension order is determined by the
+// value of `data_format`, see above for details. Dilations in the batch and
+// depth dimensions must be 1.
+// If not specified, defaults to <i:1 i:1 i:1 i:1 i:1 >
+func Conv3DBackpropInputV2Dilations(value []int64) Conv3DBackpropInputV2Attr {
+ return func(m optionalAttr) {
+ m["dilations"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// Computes square root of x element-wise.
+// Computes the gradients of 3-D convolution with respect to the input.
//
-// I.e., \\(y = \sqrt{x} = x^{1/2}\\).
-func Sqrt(scope *Scope, x tf.Output) (y tf.Output) {
+// Arguments:
+// input_sizes: An integer vector representing the tensor shape of `input`,
+// where `input` is a 5-D
+// `[batch, depth, rows, cols, in_channels]` tensor.
+// filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
+// `in_channels` must match between `input` and `filter`.
+// out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
+// out_channels]`.
+// strides: 1-D tensor of length 5. The stride of the sliding window for each
+// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
+// padding: The type of padding algorithm to use.
+func Conv3DBackpropInputV2(scope *Scope, input_sizes tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv3DBackpropInputV2Attr) (output tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"strides": strides, "padding": padding}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "Sqrt",
+ Type: "Conv3DBackpropInputV2",
Input: []tf.Input{
- x,
+ input_sizes, filter, out_backprop,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// MatrixInverseAttr is an optional argument to MatrixInverse.
-type MatrixInverseAttr func(optionalAttr)
+// DepthToSpaceAttr is an optional argument to DepthToSpace.
+type DepthToSpaceAttr func(optionalAttr)
-// MatrixInverseAdjoint sets the optional adjoint attribute to value.
-// If not specified, defaults to false
-func MatrixInverseAdjoint(value bool) MatrixInverseAttr {
+// DepthToSpaceDataFormat sets the optional data_format attribute to value.
+// If not specified, defaults to "NHWC"
+func DepthToSpaceDataFormat(value string) DepthToSpaceAttr {
return func(m optionalAttr) {
- m["adjoint"] = value
+ m["data_format"] = value
}
}
-// Computes the inverse of one or more square invertible matrices or their
+// DepthToSpace for tensors of type T.
//
-// adjoints (conjugate transposes).
+// Rearranges data from depth into blocks of spatial data.
+// This is the reverse transformation of SpaceToDepth. More specifically,
+// this op outputs a copy of the input tensor where values from the `depth`
+// dimension are moved in spatial blocks to the `height` and `width` dimensions.
+// The attr `block_size` indicates the input block size and how the data is moved.
//
-// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
-// form square matrices. The output is a tensor of the same shape as the input
-// containing the inverse for all input submatrices `[..., :, :]`.
+// * Chunks of data of size `block_size * block_size` from depth are rearranged
+// into non-overlapping blocks of size `block_size x block_size`
+// * The width the output tensor is `input_depth * block_size`, whereas the
+// height is `input_height * block_size`.
+// * The Y, X coordinates within each block of the output image are determined
+// by the high order component of the input channel index.
+// * The depth of the input tensor must be divisible by
+// `block_size * block_size`.
//
-// The op uses LU decomposition with partial pivoting to compute the inverses.
+// The `data_format` attr specifies the layout of the input and output tensors
+// with the following options:
+// "NHWC": `[ batch, height, width, channels ]`
+// "NCHW": `[ batch, channels, height, width ]`
+// "NCHW_VECT_C":
+// `qint8 [ batch, channels / 4, height, width, 4 ]`
//
-// If a matrix is not invertible there is no guarantee what the op does. It
-// may detect the condition and raise an exception or it may simply return a
-// garbage result.
+// It is useful to consider the operation as transforming a 6-D Tensor.
+// e.g. for data_format = NHWC,
+// Each element in the input tensor can be specified via 6 coordinates,
+// ordered by decreasing memory layout significance as:
+// n,iY,iX,bY,bX,oC (where n=batch index, iX, iY means X or Y coordinates
+// within the input image, bX, bY means coordinates
+// within the output block, oC means output channels).
+// The output would be the input transposed to the following layout:
+// n,iY,bY,iX,bX,oC
//
-// Arguments:
-// input: Shape is `[..., M, M]`.
+// This operation is useful for resizing the activations between convolutions
+// (but keeping all data), e.g. instead of pooling. It is also useful for training
+// purely convolutional models.
//
-// Returns Shape is `[..., M, M]`.
+// For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" and
+// block_size = 2:
//
-// @compatibility(numpy)
-// Equivalent to np.linalg.inv
-// @end_compatibility
-func MatrixInverse(scope *Scope, input tf.Output, optional ...MatrixInverseAttr) (output tf.Output) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
- opspec := tf.OpSpec{
- Type: "MatrixInverse",
- Input: []tf.Input{
- input,
- },
- Attrs: attrs,
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// Computes the gradient for the sqrt of `x` wrt its input.
+// ```
+// x = [[[[1, 2, 3, 4]]]]
//
-// Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy`
-// is the corresponding input gradient.
-func SqrtGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "SqrtGrad",
- Input: []tf.Input{
- y, dy,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// Inserts a dimension of 1 into a tensor's shape.
+// ```
//
-// Given a tensor `input`, this operation inserts a dimension of 1 at the
-// dimension index `axis` of `input`'s shape. The dimension index `axis` starts at
-// zero; if you specify a negative number for `axis` it is counted backward from
-// the end.
+// This operation will output a tensor of shape `[1, 2, 2, 1]`:
//
-// This operation is useful if you want to add a batch dimension to a single
-// element. For example, if you have a single image of shape `[height, width,
-// channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
-// which will make the shape `[1, height, width, channels]`.
+// ```
+// [[[[1], [2]],
+// [[3], [4]]]]
+// ```
//
-// Other examples:
+// Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`,
+// the corresponding output will have 2x2 elements and will have a depth of
+// 1 channel (1 = `4 / (block_size * block_size)`).
+// The output element shape is `[2, 2, 1]`.
+//
+// For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.
//
// ```
-// # 't' is a tensor of shape [2]
-// shape(expand_dims(t, 0)) ==> [1, 2]
-// shape(expand_dims(t, 1)) ==> [2, 1]
-// shape(expand_dims(t, -1)) ==> [2, 1]
+// x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
+// ```
+//
+// This operation, for block size of 2, will return the following tensor of shape
+// `[1, 2, 2, 3]`
//
-// # 't2' is a tensor of shape [2, 3, 5]
-// shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
-// shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
-// shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
// ```
+// [[[[1, 2, 3], [4, 5, 6]],
+// [[7, 8, 9], [10, 11, 12]]]]
//
-// This operation requires that:
+// ```
//
-// `-1-input.dims() <= dim <= input.dims()`
+// Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:
//
-// This operation is related to `squeeze()`, which removes dimensions of
-// size 1.
+// ```
+// x = [[[[1, 2, 3, 4],
+// [5, 6, 7, 8]],
+// [[9, 10, 11, 12],
+// [13, 14, 15, 16]]]]
+// ```
//
-// Arguments:
+// the operator will return the following tensor of shape `[1 4 4 1]`:
//
-// axis: 0-D (scalar). Specifies the dimension index at which to
-// expand the shape of `input`. Must be in the range
-// `[-rank(input) - 1, rank(input)]`.
+// ```
+// x = [[[ [1], [2], [5], [6]],
+// [ [3], [4], [7], [8]],
+// [ [9], [10], [13], [14]],
+// [ [11], [12], [15], [16]]]]
//
-// Returns Contains the same data as `input`, but its shape has an additional
-// dimension of size 1 added.
-func ExpandDims(scope *Scope, input tf.Output, axis tf.Output) (output tf.Output) {
+// ```
+//
+// Arguments:
+//
+// block_size: The size of the spatial block, same as in Space2Depth.
+func DepthToSpace(scope *Scope, input tf.Output, block_size int64, optional ...DepthToSpaceAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"block_size": block_size}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "ExpandDims",
+ Type: "DepthToSpace",
Input: []tf.Input{
- input, axis,
+ input,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// AllAttr is an optional argument to All.
-type AllAttr func(optionalAttr)
+// MapStageAttr is an optional argument to MapStage.
+type MapStageAttr func(optionalAttr)
-// AllKeepDims sets the optional keep_dims attribute to value.
+// MapStageCapacity sets the optional capacity attribute to value.
//
-// value: If true, retain reduced dimensions with length 1.
-// If not specified, defaults to false
-func AllKeepDims(value bool) AllAttr {
+// value: Maximum number of elements in the Staging Area. If > 0, inserts
+// on the container will block when the capacity is reached.
+// If not specified, defaults to 0
+//
+// REQUIRES: value >= 0
+func MapStageCapacity(value int64) MapStageAttr {
return func(m optionalAttr) {
- m["keep_dims"] = value
+ m["capacity"] = value
}
}
-// Computes the "logical and" of elements across dimensions of a tensor.
-//
-// Reduces `input` along the dimensions given in `axis`. Unless
-// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
-// `axis`. If `keep_dims` is true, the reduced dimensions are
-// retained with length 1.
-//
-// Arguments:
-// input: The tensor to reduce.
-// axis: The dimensions to reduce. Must be in the range
-// `[-rank(input), rank(input))`.
+// MapStageMemoryLimit sets the optional memory_limit attribute to value.
+// If not specified, defaults to 0
//
-// Returns The reduced tensor.
-func All(scope *Scope, input tf.Output, axis tf.Output, optional ...AllAttr) (output tf.Output) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
- opspec := tf.OpSpec{
- Type: "All",
- Input: []tf.Input{
- input, axis,
- },
- Attrs: attrs,
+// REQUIRES: value >= 0
+func MapStageMemoryLimit(value int64) MapStageAttr {
+ return func(m optionalAttr) {
+ m["memory_limit"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// CTCBeamSearchDecoderAttr is an optional argument to CTCBeamSearchDecoder.
-type CTCBeamSearchDecoderAttr func(optionalAttr)
-
-// CTCBeamSearchDecoderMergeRepeated sets the optional merge_repeated attribute to value.
+// MapStageContainer sets the optional container attribute to value.
//
-// value: If true, merge repeated classes in output.
-// If not specified, defaults to true
-func CTCBeamSearchDecoderMergeRepeated(value bool) CTCBeamSearchDecoderAttr {
+// value: If non-empty, this queue is placed in the given container. Otherwise,
+// a default container is used.
+// If not specified, defaults to ""
+func MapStageContainer(value string) MapStageAttr {
return func(m optionalAttr) {
- m["merge_repeated"] = value
+ m["container"] = value
}
}
-// Performs beam search decoding on the logits given in input.
+// MapStageSharedName sets the optional shared_name attribute to value.
//
-// A note about the attribute merge_repeated: For the beam search decoder,
-// this means that if consecutive entries in a beam are the same, only
-// the first of these is emitted. That is, when the top path is "A B B B B",
-// "A B" is returned if merge_repeated = True but "A B B B B" is
-// returned if merge_repeated = False.
+// value: It is necessary to match this name to the matching Unstage Op.
+// If not specified, defaults to ""
+func MapStageSharedName(value string) MapStageAttr {
+ return func(m optionalAttr) {
+ m["shared_name"] = value
+ }
+}
+
+// Stage (key, values) in the underlying container which behaves like a hashtable.
//
// Arguments:
-// inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
-// sequence_length: A vector containing sequence lengths, size `(batch)`.
-// beam_width: A scalar >= 0 (beam search beam width).
-// top_paths: A scalar >= 0, <= beam_width (controls output size).
+// key: int64
//
-// Returns A list (length: top_paths) of indices matrices. Matrix j,
-// size `(total_decoded_outputs[j] x 2)`, has indices of a
-// `SparseTensor<int64, 2>`. The rows store: [batch, time].A list (length: top_paths) of values vectors. Vector j,
-// size `(length total_decoded_outputs[j])`, has the values of a
-// `SparseTensor<int64, 2>`. The vector stores the decoded classes for beam j.A list (length: top_paths) of shape vector. Vector j,
-// size `(2)`, stores the shape of the decoded `SparseTensor[j]`.
-// Its values are: `[batch_size, max_decoded_length[j]]`.A matrix, shaped: `(batch_size x top_paths)`. The
-// sequence log-probabilities.
-func CTCBeamSearchDecoder(scope *Scope, inputs tf.Output, sequence_length tf.Output, beam_width int64, top_paths int64, optional ...CTCBeamSearchDecoderAttr) (decoded_indices []tf.Output, decoded_values []tf.Output, decoded_shape []tf.Output, log_probability tf.Output) {
+// values: a list of tensors
+// dtypes A list of data types that inserted values should adhere to.
+//
+//
+// Returns the created operation.
+func MapStage(scope *Scope, key tf.Output, indices tf.Output, values []tf.Output, dtypes []tf.DataType, optional ...MapStageAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"beam_width": beam_width, "top_paths": top_paths}
+ attrs := map[string]interface{}{"dtypes": dtypes}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "CTCBeamSearchDecoder",
+ Type: "MapStage",
Input: []tf.Input{
- inputs, sequence_length,
+ key, indices, tf.OutputList(values),
},
Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- if scope.Err() != nil {
- return
- }
- var idx int
- var err error
- if decoded_indices, idx, err = makeOutputList(op, idx, "decoded_indices"); err != nil {
- scope.UpdateErr("CTCBeamSearchDecoder", err)
- return
- }
- if decoded_values, idx, err = makeOutputList(op, idx, "decoded_values"); err != nil {
- scope.UpdateErr("CTCBeamSearchDecoder", err)
- return
- }
- if decoded_shape, idx, err = makeOutputList(op, idx, "decoded_shape"); err != nil {
- scope.UpdateErr("CTCBeamSearchDecoder", err)
- return
- }
- log_probability = op.Output(idx)
- return decoded_indices, decoded_values, decoded_shape, log_probability
-}
-
-// Computes reciprocal of square root of x element-wise.
-//
-// I.e., \\(y = 1 / \sqrt{x}\\).
-func Rsqrt(scope *Scope, x tf.Output) (y tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "Rsqrt",
- Input: []tf.Input{
- x,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// RecordInputAttr is an optional argument to RecordInput.
-type RecordInputAttr func(optionalAttr)
-
-// RecordInputFileRandomSeed sets the optional file_random_seed attribute to value.
-//
-// value: Random seeds used to produce randomized records.
-// If not specified, defaults to 301
-func RecordInputFileRandomSeed(value int64) RecordInputAttr {
- return func(m optionalAttr) {
- m["file_random_seed"] = value
- }
-}
+// MapPeekAttr is an optional argument to MapPeek.
+type MapPeekAttr func(optionalAttr)
-// RecordInputFileShuffleShiftRatio sets the optional file_shuffle_shift_ratio attribute to value.
-//
-// value: Shifts the list of files after the list is randomly
-// shuffled.
+// MapPeekCapacity sets the optional capacity attribute to value.
// If not specified, defaults to 0
-func RecordInputFileShuffleShiftRatio(value float32) RecordInputAttr {
- return func(m optionalAttr) {
- m["file_shuffle_shift_ratio"] = value
- }
-}
-
-// RecordInputFileBufferSize sets the optional file_buffer_size attribute to value.
//
-// value: The randomization shuffling buffer.
-// If not specified, defaults to 10000
-func RecordInputFileBufferSize(value int64) RecordInputAttr {
+// REQUIRES: value >= 0
+func MapPeekCapacity(value int64) MapPeekAttr {
return func(m optionalAttr) {
- m["file_buffer_size"] = value
+ m["capacity"] = value
}
}
-// RecordInputFileParallelism sets the optional file_parallelism attribute to value.
+// MapPeekMemoryLimit sets the optional memory_limit attribute to value.
+// If not specified, defaults to 0
//
-// value: How many sstables are opened and concurrently iterated over.
-// If not specified, defaults to 16
-func RecordInputFileParallelism(value int64) RecordInputAttr {
+// REQUIRES: value >= 0
+func MapPeekMemoryLimit(value int64) MapPeekAttr {
return func(m optionalAttr) {
- m["file_parallelism"] = value
+ m["memory_limit"] = value
}
}
-// RecordInputBatchSize sets the optional batch_size attribute to value.
-//
-// value: The batch size.
-// If not specified, defaults to 32
-func RecordInputBatchSize(value int64) RecordInputAttr {
+// MapPeekContainer sets the optional container attribute to value.
+// If not specified, defaults to ""
+func MapPeekContainer(value string) MapPeekAttr {
return func(m optionalAttr) {
- m["batch_size"] = value
+ m["container"] = value
}
}
-// RecordInputCompressionType sets the optional compression_type attribute to value.
-//
-// value: The type of compression for the file. Currently ZLIB and
-// GZIP are supported. Defaults to none.
+// MapPeekSharedName sets the optional shared_name attribute to value.
// If not specified, defaults to ""
-func RecordInputCompressionType(value string) RecordInputAttr {
+func MapPeekSharedName(value string) MapPeekAttr {
return func(m optionalAttr) {
- m["compression_type"] = value
+ m["shared_name"] = value
}
}
-// Emits randomized records.
-//
-// Arguments:
-// file_pattern: Glob pattern for the data files.
+// Op peeks at the values at the specified key. If the
//
-// Returns A tensor of shape [batch_size].
-func RecordInput(scope *Scope, file_pattern string, optional ...RecordInputAttr) (records tf.Output) {
+// underlying container does not contain this key
+// this op will block until it does.
+func MapPeek(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...MapPeekAttr) (values []tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"file_pattern": file_pattern}
+ attrs := map[string]interface{}{"dtypes": dtypes}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "RecordInput",
-
- Attrs: attrs,
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// Rounds the values of a tensor to the nearest integer, element-wise.
-//
-// Rounds half to even. Also known as bankers rounding. If you want to round
-// according to the current system rounding mode use std::cint.
-func Round(scope *Scope, x tf.Output) (y tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "Round",
+ Type: "MapPeek",
Input: []tf.Input{
- x,
+ key, indices,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// Generates values in an interval.
-//
-// A sequence of `num` evenly-spaced values are generated beginning at `start`.
-// If `num > 1`, the values in the sequence increase by `stop - start / num - 1`,
-// so that the last one is exactly `stop`.
-//
-// For example:
-//
-// ```
-// tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0]
-// ```
-//
-// Arguments:
-// start: First entry in the range.
-// stop: Last entry in the range.
-// num: Number of values to generate.
-//
-// Returns 1-D. The generated values.
-func LinSpace(scope *Scope, start tf.Output, stop tf.Output, num tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
- opspec := tf.OpSpec{
- Type: "LinSpace",
- Input: []tf.Input{
- start, stop, num,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// Computes natural logarithm of x element-wise.
-//
-// I.e., \\(y = \log_e x\\).
-func Log(scope *Scope, x tf.Output) (y tf.Output) {
- if scope.Err() != nil {
+ var idx int
+ var err error
+ if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
+ scope.UpdateErr("MapPeek", err)
return
}
- opspec := tf.OpSpec{
- Type: "Log",
- Input: []tf.Input{
- x,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return values
}
-// ResizeBicubicAttr is an optional argument to ResizeBicubic.
-type ResizeBicubicAttr func(optionalAttr)
+// QueueCloseV2Attr is an optional argument to QueueCloseV2.
+type QueueCloseV2Attr func(optionalAttr)
-// ResizeBicubicAlignCorners sets the optional align_corners attribute to value.
+// QueueCloseV2CancelPendingEnqueues sets the optional cancel_pending_enqueues attribute to value.
//
-// value: If true, rescale input by (new_height - 1) / (height - 1), which
-// exactly aligns the 4 corners of images and resized images. If false, rescale
-// by new_height / height. Treat similarly the width dimension.
+// value: If true, all pending enqueue requests that are
+// blocked on the given queue will be canceled.
// If not specified, defaults to false
-func ResizeBicubicAlignCorners(value bool) ResizeBicubicAttr {
+func QueueCloseV2CancelPendingEnqueues(value bool) QueueCloseV2Attr {
return func(m optionalAttr) {
- m["align_corners"] = value
+ m["cancel_pending_enqueues"] = value
}
}
-// Resize `images` to `size` using bicubic interpolation.
+// Closes the given queue.
//
-// Input images can be of different types but output images are always float.
+// This operation signals that no more elements will be enqueued in the
+// given queue. Subsequent Enqueue(Many) operations will fail.
+// Subsequent Dequeue(Many) operations will continue to succeed if
+// sufficient elements remain in the queue. Subsequent Dequeue(Many)
+// operations that would block will fail immediately.
//
// Arguments:
-// images: 4-D with shape `[batch, height, width, channels]`.
-// size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The
-// new size for the images.
+// handle: The handle to a queue.
//
-// Returns 4-D with shape
-// `[batch, new_height, new_width, channels]`.
-func ResizeBicubic(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeBicubicAttr) (resized_images tf.Output) {
+// Returns the created operation.
+func QueueCloseV2(scope *Scope, handle tf.Output, optional ...QueueCloseV2Attr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
@@ -24858,146 +24234,153 @@ func ResizeBicubic(scope *Scope, images tf.Output, size tf.Output, optional ...R
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ResizeBicubic",
+ Type: "QueueCloseV2",
Input: []tf.Input{
- images, size,
+ handle,
},
Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// Computes rectified linear 6 gradients for a Relu6 operation.
+// Forwards the value of an available tensor from `inputs` to `output`.
+//
+// `Merge` waits for at least one of the tensors in `inputs` to become available.
+// It is usually combined with `Switch` to implement branching.
+//
+// `Merge` forwards the first tensor to become available to `output`, and sets
+// `value_index` to its index in `inputs`.
//
// Arguments:
-// gradients: The backpropagated gradients to the corresponding Relu6 operation.
-// features: The features passed as input to the corresponding Relu6 operation, or
-// its output; using either one produces the same result.
+// inputs: The input tensors, exactly one of which will become available.
//
-// Returns The gradients:
-// `gradients * (features > 0) * (features < 6)`.
-func Relu6Grad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output) {
+// Returns Will be set to the available input tensor.The index of the chosen input tensor in `inputs`.
+func Merge(scope *Scope, inputs []tf.Output) (output tf.Output, value_index tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Relu6Grad",
+ Type: "Merge",
Input: []tf.Input{
- gradients, features,
+ tf.OutputList(inputs),
},
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1)
}
-// Computes natural logarithm of (1 + x) element-wise.
+// MapUnstageAttr is an optional argument to MapUnstage.
+type MapUnstageAttr func(optionalAttr)
+
+// MapUnstageCapacity sets the optional capacity attribute to value.
+// If not specified, defaults to 0
//
-// I.e., \\(y = \log_e (1 + x)\\).
-func Log1p(scope *Scope, x tf.Output) (y tf.Output) {
- if scope.Err() != nil {
- return
+// REQUIRES: value >= 0
+func MapUnstageCapacity(value int64) MapUnstageAttr {
+ return func(m optionalAttr) {
+ m["capacity"] = value
}
- opspec := tf.OpSpec{
- Type: "Log1p",
- Input: []tf.Input{
- x,
- },
+}
+
+// MapUnstageMemoryLimit sets the optional memory_limit attribute to value.
+// If not specified, defaults to 0
+//
+// REQUIRES: value >= 0
+func MapUnstageMemoryLimit(value int64) MapUnstageAttr {
+ return func(m optionalAttr) {
+ m["memory_limit"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// Creates a dataset that emits each dim-0 slice of `components` once.
-func TensorSliceDataset(scope *Scope, components []tf.Output, output_shapes []tf.Shape) (handle tf.Output) {
- if scope.Err() != nil {
- return
+// MapUnstageContainer sets the optional container attribute to value.
+// If not specified, defaults to ""
+func MapUnstageContainer(value string) MapUnstageAttr {
+ return func(m optionalAttr) {
+ m["container"] = value
}
- attrs := map[string]interface{}{"output_shapes": output_shapes}
- opspec := tf.OpSpec{
- Type: "TensorSliceDataset",
- Input: []tf.Input{
- tf.OutputList(components),
- },
- Attrs: attrs,
+}
+
+// MapUnstageSharedName sets the optional shared_name attribute to value.
+// If not specified, defaults to ""
+func MapUnstageSharedName(value string) MapUnstageAttr {
+ return func(m optionalAttr) {
+ m["shared_name"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// Computes tan of x element-wise.
-func Tan(scope *Scope, x tf.Output) (y tf.Output) {
+// Op removes and returns the values associated with the key
+//
+// from the underlying container. If the underlying container
+// does not contain this key, the op will block until it does.
+func MapUnstage(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...MapUnstageAttr) (values []tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"dtypes": dtypes}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "Tan",
+ Type: "MapUnstage",
Input: []tf.Input{
- x,
+ key, indices,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// Computes hyperbolic cosine of x element-wise.
-func Cosh(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err() != nil {
return
}
- opspec := tf.OpSpec{
- Type: "Cosh",
- Input: []tf.Input{
- x,
- },
+ var idx int
+ var err error
+ if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
+ scope.UpdateErr("MapUnstage", err)
+ return
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return values
}
-// MapClearAttr is an optional argument to MapClear.
-type MapClearAttr func(optionalAttr)
+// MapSizeAttr is an optional argument to MapSize.
+type MapSizeAttr func(optionalAttr)
-// MapClearCapacity sets the optional capacity attribute to value.
+// MapSizeCapacity sets the optional capacity attribute to value.
// If not specified, defaults to 0
//
// REQUIRES: value >= 0
-func MapClearCapacity(value int64) MapClearAttr {
+func MapSizeCapacity(value int64) MapSizeAttr {
return func(m optionalAttr) {
m["capacity"] = value
}
}
-// MapClearMemoryLimit sets the optional memory_limit attribute to value.
+// MapSizeMemoryLimit sets the optional memory_limit attribute to value.
// If not specified, defaults to 0
//
// REQUIRES: value >= 0
-func MapClearMemoryLimit(value int64) MapClearAttr {
+func MapSizeMemoryLimit(value int64) MapSizeAttr {
return func(m optionalAttr) {
m["memory_limit"] = value
}
}
-// MapClearContainer sets the optional container attribute to value.
+// MapSizeContainer sets the optional container attribute to value.
// If not specified, defaults to ""
-func MapClearContainer(value string) MapClearAttr {
+func MapSizeContainer(value string) MapSizeAttr {
return func(m optionalAttr) {
m["container"] = value
}
}
-// MapClearSharedName sets the optional shared_name attribute to value.
+// MapSizeSharedName sets the optional shared_name attribute to value.
// If not specified, defaults to ""
-func MapClearSharedName(value string) MapClearAttr {
+func MapSizeSharedName(value string) MapSizeAttr {
return func(m optionalAttr) {
m["shared_name"] = value
}
}
-// Op removes all elements in the underlying container.
-//
-// Returns the created operation.
-func MapClear(scope *Scope, dtypes []tf.DataType, optional ...MapClearAttr) (o *tf.Operation) {
+// Op returns the number of elements in the underlying container.
+func MapSize(scope *Scope, dtypes []tf.DataType, optional ...MapSizeAttr) (size tf.Output) {
if scope.Err() != nil {
return
}
@@ -25006,328 +24389,306 @@ func MapClear(scope *Scope, dtypes []tf.DataType, optional ...MapClearAttr) (o *
a(attrs)
}
opspec := tf.OpSpec{
- Type: "MapClear",
+ Type: "MapSize",
Attrs: attrs,
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// TensorArrayV2Attr is an optional argument to TensorArrayV2.
-type TensorArrayV2Attr func(optionalAttr)
+// MapIncompleteSizeAttr is an optional argument to MapIncompleteSize.
+type MapIncompleteSizeAttr func(optionalAttr)
-// TensorArrayV2ElementShape sets the optional element_shape attribute to value.
-// If not specified, defaults to <unknown_rank:true >
-func TensorArrayV2ElementShape(value tf.Shape) TensorArrayV2Attr {
+// MapIncompleteSizeCapacity sets the optional capacity attribute to value.
+// If not specified, defaults to 0
+//
+// REQUIRES: value >= 0
+func MapIncompleteSizeCapacity(value int64) MapIncompleteSizeAttr {
return func(m optionalAttr) {
- m["element_shape"] = value
+ m["capacity"] = value
}
}
-// TensorArrayV2DynamicSize sets the optional dynamic_size attribute to value.
-// If not specified, defaults to false
-func TensorArrayV2DynamicSize(value bool) TensorArrayV2Attr {
+// MapIncompleteSizeMemoryLimit sets the optional memory_limit attribute to value.
+// If not specified, defaults to 0
+//
+// REQUIRES: value >= 0
+func MapIncompleteSizeMemoryLimit(value int64) MapIncompleteSizeAttr {
return func(m optionalAttr) {
- m["dynamic_size"] = value
+ m["memory_limit"] = value
}
}
-// TensorArrayV2ClearAfterRead sets the optional clear_after_read attribute to value.
-// If not specified, defaults to true
-func TensorArrayV2ClearAfterRead(value bool) TensorArrayV2Attr {
+// MapIncompleteSizeContainer sets the optional container attribute to value.
+// If not specified, defaults to ""
+func MapIncompleteSizeContainer(value string) MapIncompleteSizeAttr {
return func(m optionalAttr) {
- m["clear_after_read"] = value
+ m["container"] = value
}
}
-// TensorArrayV2TensorArrayName sets the optional tensor_array_name attribute to value.
+// MapIncompleteSizeSharedName sets the optional shared_name attribute to value.
// If not specified, defaults to ""
-func TensorArrayV2TensorArrayName(value string) TensorArrayV2Attr {
+func MapIncompleteSizeSharedName(value string) MapIncompleteSizeAttr {
return func(m optionalAttr) {
- m["tensor_array_name"] = value
+ m["shared_name"] = value
}
}
-// Deprecated. Use TensorArrayV3
-func TensorArrayV2(scope *Scope, size tf.Output, dtype tf.DataType, optional ...TensorArrayV2Attr) (handle tf.Output) {
+// Op returns the number of incomplete elements in the underlying container.
+func MapIncompleteSize(scope *Scope, dtypes []tf.DataType, optional ...MapIncompleteSizeAttr) (size tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtype": dtype}
+ attrs := map[string]interface{}{"dtypes": dtypes}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "TensorArrayV2",
- Input: []tf.Input{
- size,
- },
+ Type: "MapIncompleteSize",
+
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// SerializeManySparseAttr is an optional argument to SerializeManySparse.
-type SerializeManySparseAttr func(optionalAttr)
+// OrderedMapUnstageAttr is an optional argument to OrderedMapUnstage.
+type OrderedMapUnstageAttr func(optionalAttr)
-// SerializeManySparseOutType sets the optional out_type attribute to value.
+// OrderedMapUnstageCapacity sets the optional capacity attribute to value.
+// If not specified, defaults to 0
//
-// value: The `dtype` to use for serialization; the supported types are `string`
-// (default) and `variant`.
-// If not specified, defaults to DT_STRING
-func SerializeManySparseOutType(value tf.DataType) SerializeManySparseAttr {
+// REQUIRES: value >= 0
+func OrderedMapUnstageCapacity(value int64) OrderedMapUnstageAttr {
return func(m optionalAttr) {
- m["out_type"] = value
+ m["capacity"] = value
}
}
-// Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object.
-//
-// The `SparseTensor` must have rank `R` greater than 1, and the first dimension
-// is treated as the minibatch dimension. Elements of the `SparseTensor`
-// must be sorted in increasing order of this first dimension. The serialized
-// `SparseTensor` objects going into each row of `serialized_sparse` will have
-// rank `R-1`.
+// OrderedMapUnstageMemoryLimit sets the optional memory_limit attribute to value.
+// If not specified, defaults to 0
//
-// The minibatch size `N` is extracted from `sparse_shape[0]`.
+// REQUIRES: value >= 0
+func OrderedMapUnstageMemoryLimit(value int64) OrderedMapUnstageAttr {
+ return func(m optionalAttr) {
+ m["memory_limit"] = value
+ }
+}
+
+// OrderedMapUnstageContainer sets the optional container attribute to value.
+// If not specified, defaults to ""
+func OrderedMapUnstageContainer(value string) OrderedMapUnstageAttr {
+ return func(m optionalAttr) {
+ m["container"] = value
+ }
+}
+
+// OrderedMapUnstageSharedName sets the optional shared_name attribute to value.
+// If not specified, defaults to ""
+func OrderedMapUnstageSharedName(value string) OrderedMapUnstageAttr {
+ return func(m optionalAttr) {
+ m["shared_name"] = value
+ }
+}
+
+// Op removes and returns the values associated with the key
//
-// Arguments:
-// sparse_indices: 2-D. The `indices` of the minibatch `SparseTensor`.
-// sparse_values: 1-D. The `values` of the minibatch `SparseTensor`.
-// sparse_shape: 1-D. The `shape` of the minibatch `SparseTensor`.
-func SerializeManySparse(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...SerializeManySparseAttr) (serialized_sparse tf.Output) {
+// from the underlying container. If the underlying container
+// does not contain this key, the op will block until it does.
+func OrderedMapUnstage(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...OrderedMapUnstageAttr) (values []tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"dtypes": dtypes}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "SerializeManySparse",
+ Type: "OrderedMapUnstage",
Input: []tf.Input{
- sparse_indices, sparse_values, sparse_shape,
+ key, indices,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// Computes inverse hyperbolic cosine of x element-wise.
-func Acosh(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err() != nil {
return
}
- opspec := tf.OpSpec{
- Type: "Acosh",
- Input: []tf.Input{
- x,
- },
+ var idx int
+ var err error
+ if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
+ scope.UpdateErr("OrderedMapUnstage", err)
+ return
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return values
}
-// Computes the reverse mode backpropagated gradient of the Cholesky algorithm.
-//
-// For an explanation see "Differentiation of the Cholesky algorithm" by
-// Iain Murray http://arxiv.org/abs/1602.07527.
-//
-// Arguments:
-// l: Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`.
-// Algorithm depends only on lower triangular part of the innermost matrices of
-// this tensor.
-// grad: df/dl where f is some scalar function. Shape is `[..., M, M]`.
-// Algorithm depends only on lower triangular part of the innermost matrices of
-// this tensor.
+// OrderedMapSizeAttr is an optional argument to OrderedMapSize.
+type OrderedMapSizeAttr func(optionalAttr)
+
+// OrderedMapSizeCapacity sets the optional capacity attribute to value.
+// If not specified, defaults to 0
//
-// Returns Symmetrized version of df/dA . Shape is `[..., M, M]`
-func CholeskyGrad(scope *Scope, l tf.Output, grad tf.Output) (output tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "CholeskyGrad",
- Input: []tf.Input{
- l, grad,
- },
+// REQUIRES: value >= 0
+func OrderedMapSizeCapacity(value int64) OrderedMapSizeAttr {
+ return func(m optionalAttr) {
+ m["capacity"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// Computes inverse hyperbolic tangent of x element-wise.
-func Atanh(scope *Scope, x tf.Output) (y tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "Atanh",
- Input: []tf.Input{
- x,
- },
+// OrderedMapSizeMemoryLimit sets the optional memory_limit attribute to value.
+// If not specified, defaults to 0
+//
+// REQUIRES: value >= 0
+func OrderedMapSizeMemoryLimit(value int64) OrderedMapSizeAttr {
+ return func(m optionalAttr) {
+ m["memory_limit"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// Computes the log of the absolute value of `Gamma(x)` element-wise.
-func Lgamma(scope *Scope, x tf.Output) (y tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "Lgamma",
- Input: []tf.Input{
- x,
- },
+// OrderedMapSizeContainer sets the optional container attribute to value.
+// If not specified, defaults to ""
+func OrderedMapSizeContainer(value string) OrderedMapSizeAttr {
+ return func(m optionalAttr) {
+ m["container"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// Returns x / y element-wise for real types.
-//
-// If `x` and `y` are reals, this will return the floating-point division.
-//
-// *NOTE*: `Div` supports broadcasting. More about broadcasting
-// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-func RealDiv(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "RealDiv",
- Input: []tf.Input{
- x, y,
- },
+// OrderedMapSizeSharedName sets the optional shared_name attribute to value.
+// If not specified, defaults to ""
+func OrderedMapSizeSharedName(value string) OrderedMapSizeAttr {
+ return func(m optionalAttr) {
+ m["shared_name"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// Returns the number of work units this Reader has finished processing.
-//
-// Arguments:
-// reader_handle: Handle to a Reader.
-func ReaderNumWorkUnitsCompletedV2(scope *Scope, reader_handle tf.Output) (units_completed tf.Output) {
+// Op returns the number of elements in the underlying container.
+func OrderedMapSize(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapSizeAttr) (size tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"dtypes": dtypes}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "ReaderNumWorkUnitsCompletedV2",
- Input: []tf.Input{
- reader_handle,
- },
+ Type: "OrderedMapSize",
+
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Conv2DBackpropFilterAttr is an optional argument to Conv2DBackpropFilter.
-type Conv2DBackpropFilterAttr func(optionalAttr)
+// CTCLossAttr is an optional argument to CTCLoss.
+type CTCLossAttr func(optionalAttr)
-// Conv2DBackpropFilterUseCudnnOnGpu sets the optional use_cudnn_on_gpu attribute to value.
-// If not specified, defaults to true
-func Conv2DBackpropFilterUseCudnnOnGpu(value bool) Conv2DBackpropFilterAttr {
+// CTCLossPreprocessCollapseRepeated sets the optional preprocess_collapse_repeated attribute to value.
+//
+// value: Scalar, if true then repeated labels are
+// collapsed prior to the CTC calculation.
+// If not specified, defaults to false
+func CTCLossPreprocessCollapseRepeated(value bool) CTCLossAttr {
return func(m optionalAttr) {
- m["use_cudnn_on_gpu"] = value
+ m["preprocess_collapse_repeated"] = value
}
}
-// Conv2DBackpropFilterDataFormat sets the optional data_format attribute to value.
+// CTCLossCtcMergeRepeated sets the optional ctc_merge_repeated attribute to value.
//
-// value: Specify the data format of the input and output data. With the
-// default format "NHWC", the data is stored in the order of:
-// [batch, in_height, in_width, in_channels].
-// Alternatively, the format could be "NCHW", the data storage order of:
-// [batch, in_channels, in_height, in_width].
-// If not specified, defaults to "NHWC"
-func Conv2DBackpropFilterDataFormat(value string) Conv2DBackpropFilterAttr {
+// value: Scalar. If set to false, *during* CTC calculation
+// repeated non-blank labels will not be merged and are interpreted as
+// individual labels. This is a simplified version of CTC.
+// If not specified, defaults to true
+func CTCLossCtcMergeRepeated(value bool) CTCLossAttr {
return func(m optionalAttr) {
- m["data_format"] = value
+ m["ctc_merge_repeated"] = value
}
}
-// Conv2DBackpropFilterDilations sets the optional dilations attribute to value.
+// CTCLossIgnoreLongerOutputsThanInputs sets the optional ignore_longer_outputs_than_inputs attribute to value.
//
-// value: 1-D tensor of length 4. The dilation factor for each dimension of
-// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
-// element on that dimension. The dimension order is determined by the value of
-// `data_format`, see above for details. Dilations in the batch and depth
-// dimensions must be 1.
-// If not specified, defaults to <i:1 i:1 i:1 i:1 >
-func Conv2DBackpropFilterDilations(value []int64) Conv2DBackpropFilterAttr {
+// value: Scalar. If set to true, during CTC
+// calculation, items that have longer output sequences than input sequences
+// are skipped: they don't contribute to the loss term and have zero-gradient.
+// If not specified, defaults to false
+func CTCLossIgnoreLongerOutputsThanInputs(value bool) CTCLossAttr {
return func(m optionalAttr) {
- m["dilations"] = value
+ m["ignore_longer_outputs_than_inputs"] = value
}
}
-// Computes the gradients of convolution with respect to the filter.
+// Calculates the CTC Loss (log probability) for each batch entry. Also calculates
+//
+// the gradient. This class performs the softmax operation for you, so inputs
+// should be e.g. linear projections of outputs by an LSTM.
//
// Arguments:
-// input: 4-D with shape `[batch, in_height, in_width, in_channels]`.
-// filter_sizes: An integer vector representing the tensor shape of `filter`,
-// where `filter` is a 4-D
-// `[filter_height, filter_width, in_channels, out_channels]` tensor.
-// out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`.
-// Gradients w.r.t. the output of the convolution.
-// strides: The stride of the sliding window for each dimension of the input
-// of the convolution. Must be in the same order as the dimension specified with
-// format.
-// padding: The type of padding algorithm to use.
+// inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
+// labels_indices: The indices of a `SparseTensor<int32, 2>`.
+// `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for
+// `(batch b, time t)`.
+// labels_values: The values (labels) associated with the given batch and time.
+// sequence_length: A vector containing sequence lengths (batch).
//
-// Returns 4-D with shape
-// `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t.
-// the `filter` input of the convolution.
-func Conv2DBackpropFilter(scope *Scope, input tf.Output, filter_sizes tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv2DBackpropFilterAttr) (output tf.Output) {
+// Returns A vector (batch) containing log-probabilities.The gradient of `loss`. 3-D, shape:
+// `(max_time x batch_size x num_classes)`.
+func CTCLoss(scope *Scope, inputs tf.Output, labels_indices tf.Output, labels_values tf.Output, sequence_length tf.Output, optional ...CTCLossAttr) (loss tf.Output, gradient tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"strides": strides, "padding": padding}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "Conv2DBackpropFilter",
+ Type: "CTCLoss",
Input: []tf.Input{
- input, filter_sizes, out_backprop,
+ inputs, labels_indices, labels_values, sequence_length,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1)
}
-// MinAttr is an optional argument to Min.
-type MinAttr func(optionalAttr)
+// CTCGreedyDecoderAttr is an optional argument to CTCGreedyDecoder.
+type CTCGreedyDecoderAttr func(optionalAttr)
-// MinKeepDims sets the optional keep_dims attribute to value.
+// CTCGreedyDecoderMergeRepeated sets the optional merge_repeated attribute to value.
//
-// value: If true, retain reduced dimensions with length 1.
+// value: If True, merge repeated classes in output.
// If not specified, defaults to false
-func MinKeepDims(value bool) MinAttr {
+func CTCGreedyDecoderMergeRepeated(value bool) CTCGreedyDecoderAttr {
return func(m optionalAttr) {
- m["keep_dims"] = value
+ m["merge_repeated"] = value
}
}
-// Computes the minimum of elements across dimensions of a tensor.
+// Performs greedy decoding on the logits given in inputs.
//
-// Reduces `input` along the dimensions given in `axis`. Unless
-// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
-// `axis`. If `keep_dims` is true, the reduced dimensions are
-// retained with length 1.
+// A note about the attribute merge_repeated: if enabled, when
+// consecutive logits' maximum indices are the same, only the first of
+// these is emitted. Labeling the blank '*', the sequence "A B B * B B"
+// becomes "A B B" if merge_repeated = True and "A B B B B" if
+// merge_repeated = False.
+//
+// Regardless of the value of merge_repeated, if the maximum index of a given
+// time and batch corresponds to the blank, index `(num_classes - 1)`, no new
+// element is emitted.
//
// Arguments:
-// input: The tensor to reduce.
-// axis: The dimensions to reduce. Must be in the range
-// `[-rank(input), rank(input))`.
+// inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
+// sequence_length: A vector containing sequence lengths, size `(batch_size)`.
//
-// Returns The reduced tensor.
-func Min(scope *Scope, input tf.Output, axis tf.Output, optional ...MinAttr) (output tf.Output) {
+// Returns Indices matrix, size `(total_decoded_outputs x 2)`,
+// of a `SparseTensor<int64, 2>`. The rows store: [batch, time].Values vector, size: `(total_decoded_outputs)`,
+// of a `SparseTensor<int64, 2>`. The vector stores the decoded classes.Shape vector, size `(2)`, of the decoded SparseTensor.
+// Values are: `[batch_size, max_decoded_length]`.Matrix, size `(batch_size x 1)`, containing sequence
+// log-probabilities.
+func CTCGreedyDecoder(scope *Scope, inputs tf.Output, sequence_length tf.Output, optional ...CTCGreedyDecoderAttr) (decoded_indices tf.Output, decoded_values tf.Output, decoded_shape tf.Output, log_probability tf.Output) {
if scope.Err() != nil {
return
}
@@ -25336,215 +24697,380 @@ func Min(scope *Scope, input tf.Output, axis tf.Output, optional ...MinAttr) (ou
a(attrs)
}
opspec := tf.OpSpec{
- Type: "Min",
+ Type: "CTCGreedyDecoder",
Input: []tf.Input{
- input, axis,
+ inputs, sequence_length,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
}
-// Computes Psi, the derivative of Lgamma (the log of the absolute value of
+// Forwards `data` to the output port determined by `pred`.
//
-// `Gamma(x)`), element-wise.
-func Digamma(scope *Scope, x tf.Output) (y tf.Output) {
+// If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,
+// the data goes to `output_false`.
+//
+// See also `RefSwitch` and `Merge`.
+//
+// Arguments:
+// data: The tensor to be forwarded to the appropriate output.
+// pred: A scalar that specifies which output port will receive data.
+//
+// Returns If `pred` is false, data will be forwarded to this output.If `pred` is true, data will be forwarded to this output.
+func Switch(scope *Scope, data tf.Output, pred tf.Output) (output_false tf.Output, output_true tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Digamma",
+ Type: "Switch",
Input: []tf.Input{
- x,
+ data, pred,
},
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1)
}
-// Gather slices from `params` axis `axis` according to `indices`.
-//
-// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
-// Produces an output tensor with shape `params.shape[:axis] + indices.shape +
-// params.shape[axis + 1:]` where:
-//
-// ```python
-// # Scalar indices (output is rank(params) - 1).
-// output[a_0, ..., a_n, b_0, ..., b_n] =
-// params[a_0, ..., a_n, indices, b_0, ..., b_n]
-//
-// # Vector indices (output is rank(params)).
-// output[a_0, ..., a_n, i, b_0, ..., b_n] =
-// params[a_0, ..., a_n, indices[i], b_0, ..., b_n]
-//
-// # Higher rank indices (output is rank(params) + rank(indices) - 1).
-// output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
-// params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
-// ```
-//
-// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-// <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
-// </div>
+// Add all input tensors element wise.
//
// Arguments:
-// params: The tensor from which to gather values. Must be at least rank
-// `axis + 1`.
-// indices: Index tensor. Must be in range `[0, params.shape[axis])`.
-// axis: The axis in `params` to gather `indices` from. Defaults to the first
-// dimension. Supports negative indexes.
-//
-// Returns Values from `params` gathered from indices given by `indices`, with
-// shape `params.shape[:axis] + indices.shape + params.shape[axis + 1:]`.
-func GatherV2(scope *Scope, params tf.Output, indices tf.Output, axis tf.Output) (output tf.Output) {
+// inputs: Must all be the same size and shape.
+func AddN(scope *Scope, inputs []tf.Output) (sum tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "GatherV2",
+ Type: "AddN",
Input: []tf.Input{
- params, indices, axis,
+ tf.OutputList(inputs),
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Computes the complementary error function of `x` element-wise.
-func Erfc(scope *Scope, x tf.Output) (y tf.Output) {
+// EnterAttr is an optional argument to Enter.
+type EnterAttr func(optionalAttr)
+
+// EnterIsConstant sets the optional is_constant attribute to value.
+//
+// value: If true, the output is constant within the child frame.
+// If not specified, defaults to false
+func EnterIsConstant(value bool) EnterAttr {
+ return func(m optionalAttr) {
+ m["is_constant"] = value
+ }
+}
+
+// EnterParallelIterations sets the optional parallel_iterations attribute to value.
+//
+// value: The number of iterations allowed to run in parallel.
+// If not specified, defaults to 10
+func EnterParallelIterations(value int64) EnterAttr {
+ return func(m optionalAttr) {
+ m["parallel_iterations"] = value
+ }
+}
+
+// Creates or finds a child frame, and makes `data` available to the child frame.
+//
+// This op is used together with `Exit` to create loops in the graph.
+// The unique `frame_name` is used by the `Executor` to identify frames. If
+// `is_constant` is true, `output` is a constant in the child frame; otherwise
+// it may be changed in the child frame. At most `parallel_iterations` iterations
+// are run in parallel in the child frame.
+//
+// Arguments:
+// data: The tensor to be made available to the child frame.
+// frame_name: The name of the child frame.
+//
+// Returns The same tensor as `data`.
+func Enter(scope *Scope, data tf.Output, frame_name string, optional ...EnterAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"frame_name": frame_name}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "Erfc",
+ Type: "Enter",
Input: []tf.Input{
- x,
+ data,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Computes sin of x element-wise.
-func Sin(scope *Scope, x tf.Output) (y tf.Output) {
+// Produce a string tensor that encodes the state of a Reader.
+//
+// Not all Readers support being serialized, so this can produce an
+// Unimplemented error.
+//
+// Arguments:
+// reader_handle: Handle to a Reader.
+func ReaderSerializeStateV2(scope *Scope, reader_handle tf.Output) (state tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Sin",
+ Type: "ReaderSerializeStateV2",
Input: []tf.Input{
- x,
+ reader_handle,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Computes the determinant of one or more square matrices.
+// Exits the current frame to its parent frame.
//
-// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
-// form square matrices. The output is a tensor containing the determinants
-// for all input submatrices `[..., :, :]`.
+// Exit makes its input `data` available to the parent frame.
//
// Arguments:
-// input: Shape is `[..., M, M]`.
+// data: The tensor to be made available to the parent frame.
//
-// Returns Shape is `[...]`.
-func MatrixDeterminant(scope *Scope, input tf.Output) (output tf.Output) {
+// Returns The same tensor as `data`.
+func Exit(scope *Scope, data tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "MatrixDeterminant",
+ Type: "Exit",
Input: []tf.Input{
- input,
+ data,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Computes cos of x element-wise.
-func Cos(scope *Scope, x tf.Output) (y tf.Output) {
+// Returns a copy of the input tensor.
+func Snapshot(scope *Scope, input tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Cos",
+ Type: "Snapshot",
Input: []tf.Input{
- x,
+ input,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Convert the quantized 'input' tensor into a lower-precision 'output', using the
+// Scatter `updates` into a new (initially zero) tensor according to `indices`.
//
-// output range specified with 'requested_output_min' and 'requested_output_max'.
+// Creates a new tensor by applying sparse `updates` to individual
+// values or slices within a zero tensor of the given `shape` according to
+// indices. This operator is the inverse of the @{tf.gather_nd} operator which
+// extracts values or slices from a given tensor.
//
-// [input_min, input_max] are scalar floats that specify the range for the float
-// interpretation of the 'input' data. For example, if input_min is -1.0f and
-// input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0
-// value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
+// **WARNING**: The order in which updates are applied is nondeterministic, so the
+// output will be nondeterministic if `indices` contains duplicates.
//
-// Arguments:
+// `indices` is an integer tensor containing indices into a new tensor of shape
+// `shape`. The last dimension of `indices` can be at most the rank of `shape`:
//
-// input_min: The float value that the minimum quantized input value represents.
-// input_max: The float value that the maximum quantized input value represents.
-// requested_output_min: The float value that the minimum quantized output value represents.
-// requested_output_max: The float value that the maximum quantized output value represents.
-// out_type: The type of the output. Should be a lower bit depth than Tinput.
+// indices.shape[-1] <= shape.rank
//
-// Returns The requested_output_min value is copied into this output.The requested_output_max value is copied into this output.
-func Requantize(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, requested_output_min tf.Output, requested_output_max tf.Output, out_type tf.DataType) (output tf.Output, output_min tf.Output, output_max tf.Output) {
+// The last dimension of `indices` corresponds to indices into elements
+// (if `indices.shape[-1] = shape.rank`) or slices
+// (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of
+// `shape`. `updates` is a tensor with shape
+//
+// indices.shape[:-1] + shape[indices.shape[-1]:]
+//
+// The simplest form of scatter is to insert individual elements in a tensor by
+// index. For example, say we want to insert 4 scattered elements in a rank-1
+// tensor with 8 elements.
+//
+// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+// <img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd1.png" alt>
+// </div>
+//
+// In Python, this scatter operation would look like this:
+//
+// ```python
+// indices = tf.constant([[4], [3], [1], [7]])
+// updates = tf.constant([9, 10, 11, 12])
+// shape = tf.constant([8])
+// scatter = tf.scatter_nd(indices, updates, shape)
+// with tf.Session() as sess:
+// print(sess.run(scatter))
+// ```
+//
+// The resulting tensor would look like this:
+//
+// [0, 11, 0, 10, 9, 0, 0, 12]
+//
+// We can also, insert entire slices of a higher rank tensor all at once. For
+// example, if we wanted to insert two slices in the first dimension of a
+// rank-3 tensor with two matrices of new values.
+//
+// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+// <img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd2.png" alt>
+// </div>
+//
+// In Python, this scatter operation would look like this:
+//
+// ```python
+// indices = tf.constant([[0], [2]])
+// updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
+// [7, 7, 7, 7], [8, 8, 8, 8]],
+// [[5, 5, 5, 5], [6, 6, 6, 6],
+// [7, 7, 7, 7], [8, 8, 8, 8]]])
+// shape = tf.constant([4, 4, 4])
+// scatter = tf.scatter_nd(indices, updates, shape)
+// with tf.Session() as sess:
+// print(sess.run(scatter))
+// ```
+//
+// The resulting tensor would look like this:
+//
+// [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
+// [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
+// [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
+// [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]
+//
+// Arguments:
+// indices: Index tensor.
+// updates: Updates to scatter into output.
+// shape: 1-D. The shape of the resulting tensor.
+//
+// Returns A new tensor with the given shape and updates applied according
+// to the indices.
+func ScatterNd(scope *Scope, indices tf.Output, updates tf.Output, shape tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"out_type": out_type}
opspec := tf.OpSpec{
- Type: "Requantize",
+ Type: "ScatterNd",
Input: []tf.Input{
- input, input_min, input_max, requested_output_min, requested_output_max,
+ indices, updates, shape,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
+ return op.Output(0)
}
-// ArgMinAttr is an optional argument to ArgMin.
-type ArgMinAttr func(optionalAttr)
+// SpaceToDepthAttr is an optional argument to SpaceToDepth.
+type SpaceToDepthAttr func(optionalAttr)
-// ArgMinOutputType sets the optional output_type attribute to value.
-// If not specified, defaults to DT_INT64
-func ArgMinOutputType(value tf.DataType) ArgMinAttr {
+// SpaceToDepthDataFormat sets the optional data_format attribute to value.
+// If not specified, defaults to "NHWC"
+func SpaceToDepthDataFormat(value string) SpaceToDepthAttr {
return func(m optionalAttr) {
- m["output_type"] = value
+ m["data_format"] = value
}
}
-// Returns the index with the smallest value across dimensions of a tensor.
+// SpaceToDepth for tensors of type T.
//
-// Note that in case of ties the identity of the return value is not guaranteed.
+// Rearranges blocks of spatial data, into depth. More specifically,
+// this op outputs a copy of the input tensor where values from the `height`
+// and `width` dimensions are moved to the `depth` dimension.
+// The attr `block_size` indicates the input block size.
+//
+// * Non-overlapping blocks of size `block_size x block size` are rearranged
+// into depth at each location.
+// * The depth of the output tensor is `block_size * block_size * input_depth`.
+// * The Y, X coordinates within each block of the input become the high order
+// component of the output channel index.
+// * The input tensor's height and width must be divisible by block_size.
+//
+// The `data_format` attr specifies the layout of the input and output tensors
+// with the following options:
+// "NHWC": `[ batch, height, width, channels ]`
+// "NCHW": `[ batch, channels, height, width ]`
+// "NCHW_VECT_C":
+// `qint8 [ batch, channels / 4, height, width, 4 ]`
+//
+// It is useful to consider the operation as transforming a 6-D Tensor.
+// e.g. for data_format = NHWC,
+// Each element in the input tensor can be specified via 6 coordinates,
+// ordered by decreasing memory layout significance as:
+// n,oY,bY,oX,bX,iC (where n=batch index, oX, oY means X or Y coordinates
+// within the output image, bX, bY means coordinates
+// within the input block, iC means input channels).
+// The output would be a transpose to the following layout:
+// n,oY,oX,bY,bX,iC
+//
+// This operation is useful for resizing the activations between convolutions
+// (but keeping all data), e.g. instead of pooling. It is also useful for training
+// purely convolutional models.
+//
+// For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" and
+// block_size = 2:
+//
+// ```
+// x = [[[[1], [2]],
+// [[3], [4]]]]
+// ```
+//
+// This operation will output a tensor of shape `[1, 1, 1, 4]`:
+//
+// ```
+// [[[[1, 2, 3, 4]]]]
+// ```
+//
+// Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`,
+// the corresponding output will have a single element (i.e. width and height are
+// both 1) and will have a depth of 4 channels (1 * block_size * block_size).
+// The output element shape is `[1, 1, 4]`.
+//
+// For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.
+//
+// ```
+// x = [[[[1, 2, 3], [4, 5, 6]],
+// [[7, 8, 9], [10, 11, 12]]]]
+// ```
+//
+// This operation, for block_size of 2, will return the following tensor of shape
+// `[1, 1, 1, 12]`
+//
+// ```
+// [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
+// ```
+//
+// Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:
+//
+// ```
+// x = [[[[1], [2], [5], [6]],
+// [[3], [4], [7], [8]],
+// [[9], [10], [13], [14]],
+// [[11], [12], [15], [16]]]]
+// ```
+//
+// the operator will return the following tensor of shape `[1 2 2 4]`:
+//
+// ```
+// x = [[[[1, 2, 3, 4],
+// [5, 6, 7, 8]],
+// [[9, 10, 11, 12],
+// [13, 14, 15, 16]]]]
+// ```
//
// Arguments:
//
-// dimension: int32 or int64, must be in the range `[-rank(input), rank(input))`.
-// Describes which dimension of the input Tensor to reduce across. For vectors,
-// use dimension = 0.
-func ArgMin(scope *Scope, input tf.Output, dimension tf.Output, optional ...ArgMinAttr) (output tf.Output) {
+// block_size: The size of the spatial block.
+func SpaceToDepth(scope *Scope, input tf.Output, block_size int64, optional ...SpaceToDepthAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"block_size": block_size}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ArgMin",
+ Type: "SpaceToDepth",
Input: []tf.Input{
- input, dimension,
+ input,
},
Attrs: attrs,
}
@@ -25552,80 +25078,36 @@ func ArgMin(scope *Scope, input tf.Output, dimension tf.Output, optional ...ArgM
return op.Output(0)
}
-// Computes atan of x element-wise.
-func Atan(scope *Scope, x tf.Output) (y tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "Atan",
- Input: []tf.Input{
- x,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// MfccAttr is an optional argument to Mfcc.
-type MfccAttr func(optionalAttr)
-
-// MfccUpperFrequencyLimit sets the optional upper_frequency_limit attribute to value.
-//
-// value: The highest frequency to use when calculating the
-// ceptstrum.
-// If not specified, defaults to 4000
-func MfccUpperFrequencyLimit(value float32) MfccAttr {
- return func(m optionalAttr) {
- m["upper_frequency_limit"] = value
- }
-}
+// AbortAttr is an optional argument to Abort.
+type AbortAttr func(optionalAttr)
-// MfccLowerFrequencyLimit sets the optional lower_frequency_limit attribute to value.
+// AbortErrorMsg sets the optional error_msg attribute to value.
//
-// value: The lowest frequency to use when calculating the
-// ceptstrum.
-// If not specified, defaults to 20
-func MfccLowerFrequencyLimit(value float32) MfccAttr {
+// value: A string which is the message associated with the exception.
+// If not specified, defaults to ""
+func AbortErrorMsg(value string) AbortAttr {
return func(m optionalAttr) {
- m["lower_frequency_limit"] = value
+ m["error_msg"] = value
}
}
-// MfccFilterbankChannelCount sets the optional filterbank_channel_count attribute to value.
-//
-// value: Resolution of the Mel bank used internally.
-// If not specified, defaults to 40
-func MfccFilterbankChannelCount(value int64) MfccAttr {
+// AbortExitWithoutError sets the optional exit_without_error attribute to value.
+// If not specified, defaults to false
+func AbortExitWithoutError(value bool) AbortAttr {
return func(m optionalAttr) {
- m["filterbank_channel_count"] = value
+ m["exit_without_error"] = value
}
}
-// MfccDctCoefficientCount sets the optional dct_coefficient_count attribute to value.
+// Raise a exception to abort the process when called.
//
-// value: How many output channels to produce per time slice.
-// If not specified, defaults to 13
-func MfccDctCoefficientCount(value int64) MfccAttr {
- return func(m optionalAttr) {
- m["dct_coefficient_count"] = value
- }
-}
-
-// Transforms a spectrogram into a form that's useful for speech recognition.
+// If exit_without_error is true, the process will exit normally,
+// otherwise it will exit with a SIGABORT signal.
//
-// Mel Frequency Cepstral Coefficients are a way of representing audio data that's
-// been effective as an input feature for machine learning. They are created by
-// taking the spectrum of a spectrogram (a 'cepstrum'), and discarding some of the
-// higher frequencies that are less significant to the human ear. They have a long
-// history in the speech recognition world, and https://en.wikipedia.org/wiki/Mel-frequency_cepstrum
-// is a good resource to learn more.
+// Returns nothing but an exception.
//
-// Arguments:
-// spectrogram: Typically produced by the Spectrogram op, with magnitude_squared
-// set to true.
-// sample_rate: How many samples per second the source audio used.
-func Mfcc(scope *Scope, spectrogram tf.Output, sample_rate tf.Output, optional ...MfccAttr) (output tf.Output) {
+// Returns the created operation.
+func Abort(scope *Scope, optional ...AbortAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
@@ -25634,53 +25116,79 @@ func Mfcc(scope *Scope, spectrogram tf.Output, sample_rate tf.Output, optional .
a(attrs)
}
opspec := tf.OpSpec{
- Type: "Mfcc",
- Input: []tf.Input{
- spectrogram, sample_rate,
- },
+ Type: "Abort",
+
Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// QuantizedAddAttr is an optional argument to QuantizedAdd.
-type QuantizedAddAttr func(optionalAttr)
+// UniformCandidateSamplerAttr is an optional argument to UniformCandidateSampler.
+type UniformCandidateSamplerAttr func(optionalAttr)
-// QuantizedAddToutput sets the optional Toutput attribute to value.
-// If not specified, defaults to DT_QINT32
-func QuantizedAddToutput(value tf.DataType) QuantizedAddAttr {
+// UniformCandidateSamplerSeed sets the optional seed attribute to value.
+//
+// value: If either seed or seed2 are set to be non-zero, the random number
+// generator is seeded by the given seed. Otherwise, it is seeded by a
+// random seed.
+// If not specified, defaults to 0
+func UniformCandidateSamplerSeed(value int64) UniformCandidateSamplerAttr {
return func(m optionalAttr) {
- m["Toutput"] = value
+ m["seed"] = value
}
}
-// Returns x + y element-wise, working on quantized buffers.
+// UniformCandidateSamplerSeed2 sets the optional seed2 attribute to value.
//
-// Arguments:
+// value: An second seed to avoid seed collision.
+// If not specified, defaults to 0
+func UniformCandidateSamplerSeed2(value int64) UniformCandidateSamplerAttr {
+ return func(m optionalAttr) {
+ m["seed2"] = value
+ }
+}
+
+// Generates labels for candidate sampling with a uniform distribution.
//
+// See explanations of candidate sampling and the data formats at
+// go/candidate-sampling.
//
-// min_x: The float value that the lowest quantized `x` value represents.
-// max_x: The float value that the highest quantized `x` value represents.
-// min_y: The float value that the lowest quantized `y` value represents.
-// max_y: The float value that the highest quantized `y` value represents.
+// For each batch, this op picks a single set of sampled candidate labels.
//
-// Returns The float value that the lowest quantized output value represents.The float value that the highest quantized output value represents.
+// The advantages of sampling candidates per-batch are simplicity and the
+// possibility of efficient dense matrix multiplication. The disadvantage is that
+// the sampled candidates must be chosen independently of the context and of the
+// true labels.
//
-// *NOTE*: `QuantizedAdd` supports limited forms of broadcasting. More about
-// broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-func QuantizedAdd(scope *Scope, x tf.Output, y tf.Output, min_x tf.Output, max_x tf.Output, min_y tf.Output, max_y tf.Output, optional ...QuantizedAddAttr) (z tf.Output, min_z tf.Output, max_z tf.Output) {
+// Arguments:
+// true_classes: A batch_size * num_true matrix, in which each row contains the
+// IDs of the num_true target_classes in the corresponding original label.
+// num_true: Number of true labels per context.
+// num_sampled: Number of candidates to randomly sample.
+// unique: If unique is true, we sample with rejection, so that all sampled
+// candidates in a batch are unique. This requires some approximation to
+// estimate the post-rejection sampling probabilities.
+// range_max: The sampler will sample integers from the interval [0, range_max).
+//
+// Returns A vector of length num_sampled, in which each element is
+// the ID of a sampled candidate.A batch_size * num_true matrix, representing
+// the number of times each candidate is expected to occur in a batch
+// of sampled candidates. If unique=true, then this is a probability.A vector of length num_sampled, for each sampled
+// candidate representing the number of times the candidate is expected
+// to occur in a batch of sampled candidates. If unique=true, then this is a
+// probability.
+func UniformCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...UniformCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "QuantizedAdd",
+ Type: "UniformCandidateSampler",
Input: []tf.Input{
- x, y, min_x, max_x, min_y, max_y,
+ true_classes,
},
Attrs: attrs,
}
@@ -25688,106 +25196,175 @@ func QuantizedAdd(scope *Scope, x tf.Output, y tf.Output, min_x tf.Output, max_x
return op.Output(0), op.Output(1), op.Output(2)
}
-// Returns an element-wise indication of the sign of a number.
+// FixedUnigramCandidateSamplerAttr is an optional argument to FixedUnigramCandidateSampler.
+type FixedUnigramCandidateSamplerAttr func(optionalAttr)
+
+// FixedUnigramCandidateSamplerVocabFile sets the optional vocab_file attribute to value.
//
-// `y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`.
+// value: Each valid line in this file (which should have a CSV-like format)
+// corresponds to a valid word ID. IDs are in sequential order, starting from
+// num_reserved_ids. The last entry in each line is expected to be a value
+// corresponding to the count or relative probability. Exactly one of vocab_file
+// and unigrams needs to be passed to this op.
+// If not specified, defaults to ""
+func FixedUnigramCandidateSamplerVocabFile(value string) FixedUnigramCandidateSamplerAttr {
+ return func(m optionalAttr) {
+ m["vocab_file"] = value
+ }
+}
+
+// FixedUnigramCandidateSamplerDistortion sets the optional distortion attribute to value.
//
-// For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
-func Sign(scope *Scope, x tf.Output) (y tf.Output) {
- if scope.Err() != nil {
- return
+// value: The distortion is used to skew the unigram probability distribution.
+// Each weight is first raised to the distortion's power before adding to the
+// internal unigram distribution. As a result, distortion = 1.0 gives regular
+// unigram sampling (as defined by the vocab file), and distortion = 0.0 gives
+// a uniform distribution.
+// If not specified, defaults to 1
+func FixedUnigramCandidateSamplerDistortion(value float32) FixedUnigramCandidateSamplerAttr {
+ return func(m optionalAttr) {
+ m["distortion"] = value
}
- opspec := tf.OpSpec{
- Type: "Sign",
- Input: []tf.Input{
- x,
- },
+}
+
+// FixedUnigramCandidateSamplerNumReservedIds sets the optional num_reserved_ids attribute to value.
+//
+// value: Optionally some reserved IDs can be added in the range [0,
+// ..., num_reserved_ids) by the users. One use case is that a special unknown
+// word token is used as ID 0. These IDs will have a sampling probability of 0.
+// If not specified, defaults to 0
+func FixedUnigramCandidateSamplerNumReservedIds(value int64) FixedUnigramCandidateSamplerAttr {
+ return func(m optionalAttr) {
+ m["num_reserved_ids"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// Returns element-wise smallest integer in not less than x.
-func Ceil(scope *Scope, x tf.Output) (y tf.Output) {
- if scope.Err() != nil {
- return
+// FixedUnigramCandidateSamplerNumShards sets the optional num_shards attribute to value.
+//
+// value: A sampler can be used to sample from a subset of the original range
+// in order to speed up the whole computation through parallelism. This parameter
+// (together with 'shard') indicates the number of partitions that are being
+// used in the overall computation.
+// If not specified, defaults to 1
+//
+// REQUIRES: value >= 1
+func FixedUnigramCandidateSamplerNumShards(value int64) FixedUnigramCandidateSamplerAttr {
+ return func(m optionalAttr) {
+ m["num_shards"] = value
}
- opspec := tf.OpSpec{
- Type: "Ceil",
- Input: []tf.Input{
- x,
- },
+}
+
+// FixedUnigramCandidateSamplerShard sets the optional shard attribute to value.
+//
+// value: A sampler can be used to sample from a subset of the original range
+// in order to speed up the whole computation through parallelism. This parameter
+// (together with 'num_shards') indicates the particular partition number of a
+// sampler op, when partitioning is being used.
+// If not specified, defaults to 0
+//
+// REQUIRES: value >= 0
+func FixedUnigramCandidateSamplerShard(value int64) FixedUnigramCandidateSamplerAttr {
+ return func(m optionalAttr) {
+ m["shard"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// Computes exponential of x element-wise. \\(y = e^x\\).
-func Exp(scope *Scope, x tf.Output) (y tf.Output) {
- if scope.Err() != nil {
- return
+// FixedUnigramCandidateSamplerUnigrams sets the optional unigrams attribute to value.
+//
+// value: A list of unigram counts or probabilities, one per ID in sequential
+// order. Exactly one of vocab_file and unigrams should be passed to this op.
+// If not specified, defaults to <>
+func FixedUnigramCandidateSamplerUnigrams(value []float32) FixedUnigramCandidateSamplerAttr {
+ return func(m optionalAttr) {
+ m["unigrams"] = value
}
- opspec := tf.OpSpec{
- Type: "Exp",
- Input: []tf.Input{
- x,
- },
+}
+
+// FixedUnigramCandidateSamplerSeed sets the optional seed attribute to value.
+//
+// value: If either seed or seed2 are set to be non-zero, the random number
+// generator is seeded by the given seed. Otherwise, it is seeded by a
+// random seed.
+// If not specified, defaults to 0
+func FixedUnigramCandidateSamplerSeed(value int64) FixedUnigramCandidateSamplerAttr {
+ return func(m optionalAttr) {
+ m["seed"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// Computes the Max along segments of a tensor.
+// FixedUnigramCandidateSamplerSeed2 sets the optional seed2 attribute to value.
//
-// Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
-// segments.
+// value: An second seed to avoid seed collision.
+// If not specified, defaults to 0
+func FixedUnigramCandidateSamplerSeed2(value int64) FixedUnigramCandidateSamplerAttr {
+ return func(m optionalAttr) {
+ m["seed2"] = value
+ }
+}
+
+// Generates labels for candidate sampling with a learned unigram distribution.
//
-// This operator is similar to the [unsorted segment sum operator](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
-// Instead of computing the sum over segments, it computes the maximum
-// such that:
+// A unigram sampler could use a fixed unigram distribution read from a
+// file or passed in as an in-memory array instead of building up the distribution
+// from data on the fly. There is also an option to skew the distribution by
+// applying a distortion power to the weights.
//
-// \\(output_i = \max_j data_j\\) where max is over `j` such
-// that `segment_ids[j] == i`.
+// The vocabulary file should be in CSV-like format, with the last field
+// being the weight associated with the word.
//
-// If the maximum is empty for a given segment ID `i`, it outputs the smallest possible value for specific numeric type,
-// `output[i] = numeric_limits<T>::min()`.
+// For each batch, this op picks a single set of sampled candidate labels.
//
-// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-// <img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentMax.png" alt>
-// </div>
+// The advantages of sampling candidates per-batch are simplicity and the
+// possibility of efficient dense matrix multiplication. The disadvantage is that
+// the sampled candidates must be chosen independently of the context and of the
+// true labels.
//
// Arguments:
+// true_classes: A batch_size * num_true matrix, in which each row contains the
+// IDs of the num_true target_classes in the corresponding original label.
+// num_true: Number of true labels per context.
+// num_sampled: Number of candidates to randomly sample.
+// unique: If unique is true, we sample with rejection, so that all sampled
+// candidates in a batch are unique. This requires some approximation to
+// estimate the post-rejection sampling probabilities.
+// range_max: The sampler will sample integers from the interval [0, range_max).
//
-// segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s
-// first dimension.
-//
-//
-// Returns Has same shape as data, except for dimension 0 which
-// has size `num_segments`.
-func UnsortedSegmentMax(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
+// Returns A vector of length num_sampled, in which each element is
+// the ID of a sampled candidate.A batch_size * num_true matrix, representing
+// the number of times each candidate is expected to occur in a batch
+// of sampled candidates. If unique=true, then this is a probability.A vector of length num_sampled, for each sampled
+// candidate representing the number of times the candidate is expected
+// to occur in a batch of sampled candidates. If unique=true, then this is a
+// probability.
+func FixedUnigramCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...FixedUnigramCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "UnsortedSegmentMax",
+ Type: "FixedUnigramCandidateSampler",
Input: []tf.Input{
- data, segment_ids, num_segments,
+ true_classes,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// Returns x + y element-wise.
+// Elementwise computes the bitwise AND of `x` and `y`.
//
-// *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
-// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-func Add(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
+// The result will have those bits set, that are set in both `x` and `y`. The
+// computation is performed on the underlying representations of `x` and `y`.
+func BitwiseAnd(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Add",
+ Type: "BitwiseAnd",
Input: []tf.Input{
x, y,
},
@@ -25796,16 +25373,16 @@ func Add(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
return op.Output(0)
}
-// Returns x + y element-wise.
+// Elementwise computes the bitwise left-shift of `x` and `y`.
//
-// *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
-// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-func AddV2(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
+// If `y` is negative, or greater than or equal to the width of `x` in bits the
+// result is implementation defined.
+func LeftShift(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "AddV2",
+ Type: "LeftShift",
Input: []tf.Input{
x, y,
},
@@ -25814,63 +25391,72 @@ func AddV2(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
return op.Output(0)
}
-// Saves the input tensors to disk.
-//
-// The size of `tensor_names` must match the number of tensors in `data`. `data[i]`
-// is written to `filename` with name `tensor_names[i]`.
-//
-// See also `SaveSlices`.
+// Elementwise computes the bitwise right-shift of `x` and `y`.
//
-// Arguments:
-// filename: Must have a single element. The name of the file to which we write
-// the tensor.
-// tensor_names: Shape `[N]`. The names of the tensors to be saved.
-// data: `N` tensors to save.
+// Performs a logical shift for unsigned integer types, and an arithmetic shift
+// for signed integer types.
//
-// Returns the created operation.
-func Save(scope *Scope, filename tf.Output, tensor_names tf.Output, data []tf.Output) (o *tf.Operation) {
+// If `y` is negative, or greater than or equal to than the width of `x` in bits
+// the result is implementation defined.
+func RightShift(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Save",
+ Type: "RightShift",
Input: []tf.Input{
- filename, tensor_names, tf.OutputList(data),
+ x, y,
},
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// BiasAddAttr is an optional argument to BiasAdd.
-type BiasAddAttr func(optionalAttr)
+// DecodeWavAttr is an optional argument to DecodeWav.
+type DecodeWavAttr func(optionalAttr)
-// BiasAddDataFormat sets the optional data_format attribute to value.
+// DecodeWavDesiredChannels sets the optional desired_channels attribute to value.
//
-// value: Specify the data format of the input and output data. With the
-// default format "NHWC", the bias tensor will be added to the last dimension
-// of the value tensor.
-// Alternatively, the format could be "NCHW", the data storage order of:
-// [batch, in_channels, in_height, in_width].
-// The tensor will be added to "in_channels", the third-to-the-last
-// dimension.
-// If not specified, defaults to "NHWC"
-func BiasAddDataFormat(value string) BiasAddAttr {
+// value: Number of sample channels wanted.
+// If not specified, defaults to -1
+func DecodeWavDesiredChannels(value int64) DecodeWavAttr {
return func(m optionalAttr) {
- m["data_format"] = value
+ m["desired_channels"] = value
}
}
-// Adds `bias` to `value`.
+// DecodeWavDesiredSamples sets the optional desired_samples attribute to value.
//
-// This is a special case of `tf.add` where `bias` is restricted to be 1-D.
-// Broadcasting is supported, so `value` may have any number of dimensions.
+// value: Length of audio requested.
+// If not specified, defaults to -1
+func DecodeWavDesiredSamples(value int64) DecodeWavAttr {
+ return func(m optionalAttr) {
+ m["desired_samples"] = value
+ }
+}
+
+// Decode a 16-bit PCM WAV file to a float tensor.
+//
+// The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float.
+//
+// When desired_channels is set, if the input contains fewer channels than this
+// then the last channel will be duplicated to give the requested number, else if
+// the input has more channels than requested then the additional channels will be
+// ignored.
+//
+// If desired_samples is set, then the audio will be cropped or padded with zeroes
+// to the requested length.
+//
+// The first output contains a Tensor with the content of the audio samples. The
+// lowest dimension will be the number of channels, and the second will be the
+// number of samples. For example, a ten-sample-long stereo WAV file should give an
+// output shape of [10, 2].
//
// Arguments:
-// value: Any number of dimensions.
-// bias: 1-D with size the last dimension of `value`.
+// contents: The WAV-encoded audio, usually from a file.
//
-// Returns Broadcasted sum of `value` and `bias`.
-func BiasAdd(scope *Scope, value tf.Output, bias tf.Output, optional ...BiasAddAttr) (output tf.Output) {
+// Returns 2-D with shape `[length, channels]`.Scalar holding the sample rate found in the WAV header.
+func DecodeWav(scope *Scope, contents tf.Output, optional ...DecodeWavAttr) (audio tf.Output, sample_rate tf.Output) {
if scope.Err() != nil {
return
}
@@ -25879,51 +25465,50 @@ func BiasAdd(scope *Scope, value tf.Output, bias tf.Output, optional ...BiasAddA
a(attrs)
}
opspec := tf.OpSpec{
- Type: "BiasAdd",
+ Type: "DecodeWav",
Input: []tf.Input{
- value, bias,
+ contents,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1)
}
-// SparseReduceSumSparseAttr is an optional argument to SparseReduceSumSparse.
-type SparseReduceSumSparseAttr func(optionalAttr)
+// UniqueAttr is an optional argument to Unique.
+type UniqueAttr func(optionalAttr)
-// SparseReduceSumSparseKeepDims sets the optional keep_dims attribute to value.
-//
-// value: If true, retain reduced dimensions with length 1.
-// If not specified, defaults to false
-func SparseReduceSumSparseKeepDims(value bool) SparseReduceSumSparseAttr {
+// UniqueOutIdx sets the optional out_idx attribute to value.
+// If not specified, defaults to DT_INT32
+func UniqueOutIdx(value tf.DataType) UniqueAttr {
return func(m optionalAttr) {
- m["keep_dims"] = value
+ m["out_idx"] = value
}
}
-// Computes the sum of elements across dimensions of a SparseTensor.
+// Finds unique elements in a 1-D tensor.
//
-// This Op takes a SparseTensor and is the sparse counterpart to
-// `tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a
-// SparseTensor.
+// This operation returns a tensor `y` containing all of the unique elements of `x`
+// sorted in the same order that they occur in `x`. This operation also returns a
+// tensor `idx` the same size as `x` that contains the index of each value of `x`
+// in the unique output `y`. In other words:
//
-// Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
-// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
-// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
-// with length 1.
+// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
//
-// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
-// with a single element is returned. Additionally, the axes can be negative,
-// which are interpreted according to the indexing rules in Python.
+// For example:
+//
+// ```
+// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
+// y, idx = unique(x)
+// y ==> [1, 2, 4, 7, 8]
+// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
+// ```
//
// Arguments:
-// input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
-// SparseTensor, possibly not in canonical ordering.
-// input_values: 1-D. `N` non-empty values corresponding to `input_indices`.
-// input_shape: 1-D. Shape of the input SparseTensor.
-// reduction_axes: 1-D. Length-`K` vector containing the reduction axes.
-func SparseReduceSumSparse(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceSumSparseAttr) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
+// x: 1-D.
+//
+// Returns 1-D.1-D.
+func Unique(scope *Scope, x tf.Output, optional ...UniqueAttr) (y tf.Output, idx tf.Output) {
if scope.Err() != nil {
return
}
@@ -25932,343 +25517,410 @@ func SparseReduceSumSparse(scope *Scope, input_indices tf.Output, input_values t
a(attrs)
}
opspec := tf.OpSpec{
- Type: "SparseReduceSumSparse",
+ Type: "Unique",
Input: []tf.Input{
- input_indices, input_values, input_shape, reduction_axes,
+ x,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
+ return op.Output(0), op.Output(1)
}
-// Returns x * y element-wise.
+// Concatenates a list of `N` tensors along the first dimension.
//
-// *NOTE*: `Multiply` supports broadcasting. More about broadcasting
-// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-func Mul(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
+// The input tensors are all required to have size 1 in the first dimension.
+//
+// For example:
+//
+// ```
+// # 'x' is [[1, 4]]
+// # 'y' is [[2, 5]]
+// # 'z' is [[3, 6]]
+// parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.
+// ```
+//
+// The difference between concat and parallel_concat is that concat requires all
+// of the inputs be computed before the operation will begin but doesn't require
+// that the input shapes be known during graph construction. Parallel concat
+// will copy pieces of the input into the output as they become available, in
+// some situations this can provide a performance benefit.
+//
+// Arguments:
+// values: Tensors to be concatenated. All must have size 1 in the first dimension
+// and same shape.
+// shape: the final shape of the result; should be equal to the shapes of any input
+// but with the number of input values in the first dimension.
+//
+// Returns The concatenated tensor.
+func ParallelConcat(scope *Scope, values []tf.Output, shape tf.Shape) (output tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"shape": shape}
opspec := tf.OpSpec{
- Type: "Mul",
+ Type: "ParallelConcat",
Input: []tf.Input{
- x, y,
+ tf.OutputList(values),
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Returns x / y element-wise.
+// Concatenates tensors along one dimension.
//
-// *NOTE*: `Div` supports broadcasting. More about broadcasting
-// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-func Div(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
+// Arguments:
+// concat_dim: 0-D. The dimension along which to concatenate. Must be in the
+// range [0, rank(values)).
+// values: The `N` Tensors to concatenate. Their ranks and types must match,
+// and their sizes must match in all dimensions except `concat_dim`.
+//
+// Returns A `Tensor` with the concatenation of values stacked along the
+// `concat_dim` dimension. This tensor's shape matches that of `values` except
+// in `concat_dim` where it has the sum of the sizes.
+func Concat(scope *Scope, concat_dim tf.Output, values []tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Div",
+ Type: "Concat",
Input: []tf.Input{
- x, y,
+ concat_dim, tf.OutputList(values),
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// ApproximateEqualAttr is an optional argument to ApproximateEqual.
-type ApproximateEqualAttr func(optionalAttr)
-
-// ApproximateEqualTolerance sets the optional tolerance attribute to value.
-// If not specified, defaults to 1e-05
-func ApproximateEqualTolerance(value float32) ApproximateEqualAttr {
- return func(m optionalAttr) {
- m["tolerance"] = value
- }
-}
-
-// Returns the truth value of abs(x-y) < tolerance element-wise.
-func ApproximateEqual(scope *Scope, x tf.Output, y tf.Output, optional ...ApproximateEqualAttr) (z tf.Output) {
+// Writes a `Summary` protocol buffer with a histogram.
+//
+// The generated
+// [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
+// has one summary value containing a histogram for `values`.
+//
+// This op reports an `InvalidArgument` error if any value is not finite.
+//
+// Arguments:
+// writer: A handle to a summary writer.
+// step: The step to write the summary for.
+// tag: Scalar. Tag to use for the `Summary.Value`.
+// values: Any shape. Values to use to build the histogram.
+//
+// Returns the created operation.
+func WriteHistogramSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, values tf.Output) (o *tf.Operation) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "ApproximateEqual",
+ Type: "WriteHistogramSummary",
Input: []tf.Input{
- x, y,
+ writer, step, tag, values,
},
- Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// Returns the max of x and y (i.e. x > y ? x : y) element-wise.
+// Compute the lower regularized incomplete Gamma function `Q(a, x)`.
//
-// *NOTE*: `Maximum` supports broadcasting. More about broadcasting
-// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-func Maximum(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
+// The lower regularized incomplete Gamma function is defined as:
+//
+//
+// \\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\\)
+//
+// where
+//
+// \\(gamma(a, x) = int_{0}^{x} t^{a-1} exp(-t) dt\\)
+//
+// is the lower incomplete Gamma function.
+//
+// Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete
+// Gamma function.
+func Igamma(scope *Scope, a tf.Output, x tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Maximum",
+ Type: "Igamma",
Input: []tf.Input{
- x, y,
+ a, x,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// LogUniformCandidateSamplerAttr is an optional argument to LogUniformCandidateSampler.
-type LogUniformCandidateSamplerAttr func(optionalAttr)
-
-// LogUniformCandidateSamplerSeed sets the optional seed attribute to value.
-//
-// value: If either seed or seed2 are set to be non-zero, the random number
-// generator is seeded by the given seed. Otherwise, it is seeded by a
-// random seed.
-// If not specified, defaults to 0
-func LogUniformCandidateSamplerSeed(value int64) LogUniformCandidateSamplerAttr {
- return func(m optionalAttr) {
- m["seed"] = value
- }
-}
-
-// LogUniformCandidateSamplerSeed2 sets the optional seed2 attribute to value.
-//
-// value: An second seed to avoid seed collision.
-// If not specified, defaults to 0
-func LogUniformCandidateSamplerSeed2(value int64) LogUniformCandidateSamplerAttr {
- return func(m optionalAttr) {
- m["seed2"] = value
- }
-}
-
-// Generates labels for candidate sampling with a log-uniform distribution.
+// Computes offsets of concat inputs within its output.
//
-// See explanations of candidate sampling and the data formats at
-// go/candidate-sampling.
+// For example:
//
-// For each batch, this op picks a single set of sampled candidate labels.
+// ```
+// # 'x' is [2, 2, 7]
+// # 'y' is [2, 3, 7]
+// # 'z' is [2, 5, 7]
+// concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0]
+// ```
//
-// The advantages of sampling candidates per-batch are simplicity and the
-// possibility of efficient dense matrix multiplication. The disadvantage is that
-// the sampled candidates must be chosen independently of the context and of the
-// true labels.
+// This is typically used by gradient computations for a concat operation.
//
// Arguments:
-// true_classes: A batch_size * num_true matrix, in which each row contains the
-// IDs of the num_true target_classes in the corresponding original label.
-// num_true: Number of true labels per context.
-// num_sampled: Number of candidates to randomly sample.
-// unique: If unique is true, we sample with rejection, so that all sampled
-// candidates in a batch are unique. This requires some approximation to
-// estimate the post-rejection sampling probabilities.
-// range_max: The sampler will sample integers from the interval [0, range_max).
+// concat_dim: The dimension along which to concatenate.
+// shape: The `N` int32 vectors representing shape of tensors being concatenated.
//
-// Returns A vector of length num_sampled, in which each element is
-// the ID of a sampled candidate.A batch_size * num_true matrix, representing
-// the number of times each candidate is expected to occur in a batch
-// of sampled candidates. If unique=true, then this is a probability.A vector of length num_sampled, for each sampled
-// candidate representing the number of times the candidate is expected
-// to occur in a batch of sampled candidates. If unique=true, then this is a
-// probability.
-func LogUniformCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...LogUniformCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
+// Returns The `N` int32 vectors representing the starting offset
+// of input tensors within the concatenated output.
+func ConcatOffset(scope *Scope, concat_dim tf.Output, shape []tf.Output) (offset []tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "LogUniformCandidateSampler",
+ Type: "ConcatOffset",
Input: []tf.Input{
- true_classes,
+ concat_dim, tf.OutputList(shape),
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
+ if scope.Err() != nil {
+ return
+ }
+ var idx int
+ var err error
+ if offset, idx, err = makeOutputList(op, idx, "offset"); err != nil {
+ scope.UpdateErr("ConcatOffset", err)
+ return
+ }
+ return offset
}
-// Returns the truth value of (x < y) element-wise.
+// Splits a tensor into `num_split` tensors along one dimension.
//
-// *NOTE*: `Less` supports broadcasting. More about broadcasting
-// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-func Less(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
+// Arguments:
+// axis: 0-D. The dimension along which to split. Must be in the range
+// `[-rank(value), rank(value))`.
+// value: The tensor to split.
+// num_split: The number of ways to split. Must evenly divide
+// `value.shape[split_dim]`.
+//
+// Returns They are identically shaped tensors, whose shape matches that of `value`
+// except along `axis`, where their sizes are
+// `values.shape[split_dim] / num_split`.
+func Split(scope *Scope, axis tf.Output, value tf.Output, num_split int64) (output []tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"num_split": num_split}
opspec := tf.OpSpec{
- Type: "Less",
+ Type: "Split",
Input: []tf.Input{
- x, y,
+ axis, value,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// FakeQuantWithMinMaxVarsGradientAttr is an optional argument to FakeQuantWithMinMaxVarsGradient.
-type FakeQuantWithMinMaxVarsGradientAttr func(optionalAttr)
-
-// FakeQuantWithMinMaxVarsGradientNumBits sets the optional num_bits attribute to value.
-//
-// value: The bitwidth of the quantization; between 2 and 8, inclusive.
-// If not specified, defaults to 8
-func FakeQuantWithMinMaxVarsGradientNumBits(value int64) FakeQuantWithMinMaxVarsGradientAttr {
- return func(m optionalAttr) {
- m["num_bits"] = value
+ if scope.Err() != nil {
+ return
}
-}
-
-// FakeQuantWithMinMaxVarsGradientNarrowRange sets the optional narrow_range attribute to value.
-//
-// value: Whether to quantize into 2^num_bits - 1 distinct values.
-// If not specified, defaults to false
-func FakeQuantWithMinMaxVarsGradientNarrowRange(value bool) FakeQuantWithMinMaxVarsGradientAttr {
- return func(m optionalAttr) {
- m["narrow_range"] = value
+ var idx int
+ var err error
+ if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
+ scope.UpdateErr("Split", err)
+ return
}
+ return output
}
-// Compute gradients for a FakeQuantWithMinMaxVars operation.
+// Splits a tensor into `num_split` tensors along one dimension.
//
// Arguments:
-// gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation.
-// inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation.
-// min, max: Quantization interval, scalar floats.
-//
+// value: The tensor to split.
+// size_splits: list containing the sizes of each output tensor along the split
+// dimension. Must sum to the dimension of value along split_dim.
+// Can contain one -1 indicating that dimension is to be inferred.
+// axis: 0-D. The dimension along which to split. Must be in the range
+// `[-rank(value), rank(value))`.
//
//
-// Returns Backpropagated gradients w.r.t. inputs:
-// `gradients * (inputs >= min && inputs <= max)`.Backpropagated gradients w.r.t. min parameter:
-// `sum(gradients * (inputs < min))`.Backpropagated gradients w.r.t. max parameter:
-// `sum(gradients * (inputs > max))`.
-func FakeQuantWithMinMaxVarsGradient(scope *Scope, gradients tf.Output, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsGradientAttr) (backprops_wrt_input tf.Output, backprop_wrt_min tf.Output, backprop_wrt_max tf.Output) {
+// Returns Tensors whose shape matches that of `value`
+// except along `axis`, where their sizes are
+// `size_splits[i]`.
+func SplitV(scope *Scope, value tf.Output, size_splits tf.Output, axis tf.Output, num_split int64) (output []tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
+ attrs := map[string]interface{}{"num_split": num_split}
opspec := tf.OpSpec{
- Type: "FakeQuantWithMinMaxVarsGradient",
+ Type: "SplitV",
Input: []tf.Input{
- gradients, inputs, min, max,
+ value, size_splits, axis,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
+ if scope.Err() != nil {
+ return
+ }
+ var idx int
+ var err error
+ if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
+ scope.UpdateErr("SplitV", err)
+ return
+ }
+ return output
}
-// MaxPoolGradV2Attr is an optional argument to MaxPoolGradV2.
-type MaxPoolGradV2Attr func(optionalAttr)
-
-// MaxPoolGradV2DataFormat sets the optional data_format attribute to value.
+// Gives a guarantee to the TF runtime that the input tensor is a constant.
//
-// value: Specify the data format of the input and output data. With the
-// default format "NHWC", the data is stored in the order of:
-// [batch, in_height, in_width, in_channels].
-// Alternatively, the format could be "NCHW", the data storage order of:
-// [batch, in_channels, in_height, in_width].
-// If not specified, defaults to "NHWC"
-func MaxPoolGradV2DataFormat(value string) MaxPoolGradV2Attr {
- return func(m optionalAttr) {
- m["data_format"] = value
+// The runtime is then free to make optimizations based on this.
+//
+// Only accepts value typed tensors as inputs and rejects resource variable handles
+// as input.
+//
+// Returns the input tensor without modification.
+func GuaranteeConst(scope *Scope, input tf.Output) (output tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "GuaranteeConst",
+ Input: []tf.Input{
+ input,
+ },
}
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// Computes gradients of the maxpooling function.
+// Returns a tensor of zeros with the same shape and type as x.
//
// Arguments:
-// orig_input: The original input tensor.
-// orig_output: The original output tensor.
-// grad: 4-D. Gradients w.r.t. the output of `max_pool`.
-// ksize: The size of the window for each dimension of the input tensor.
-// strides: The stride of the sliding window for each dimension of the
-// input tensor.
-// padding: The type of padding algorithm to use.
+// x: a tensor of type T.
//
-// Returns Gradients w.r.t. the input to `max_pool`.
-func MaxPoolGradV2(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize tf.Output, strides tf.Output, padding string, optional ...MaxPoolGradV2Attr) (output tf.Output) {
+// Returns a tensor of the same shape and type as x but filled with zeros.
+func ZerosLike(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"padding": padding}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "MaxPoolGradV2",
+ Type: "ZerosLike",
Input: []tf.Input{
- orig_input, orig_output, grad, ksize, strides,
+ x,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Returns the min of x and y (i.e. x < y ? x : y) element-wise.
+// Flips all bits elementwise.
//
-// *NOTE*: `Minimum` supports broadcasting. More about broadcasting
-// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-func Minimum(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
+// The result will have exactly those bits set, that are not set in `x`. The
+// computation is performed on the underlying representation of x.
+func Invert(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Minimum",
+ Type: "Invert",
Input: []tf.Input{
- x, y,
+ x,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// BiasAddGradAttr is an optional argument to BiasAddGrad.
-type BiasAddGradAttr func(optionalAttr)
+// DequantizeAttr is an optional argument to Dequantize.
+type DequantizeAttr func(optionalAttr)
-// BiasAddGradDataFormat sets the optional data_format attribute to value.
-//
-// value: Specify the data format of the input and output data. With the
-// default format "NHWC", the bias tensor will be added to the last dimension
-// of the value tensor.
-// Alternatively, the format could be "NCHW", the data storage order of:
-// [batch, in_channels, in_height, in_width].
-// The tensor will be added to "in_channels", the third-to-the-last
-// dimension.
-// If not specified, defaults to "NHWC"
-func BiasAddGradDataFormat(value string) BiasAddGradAttr {
+// DequantizeMode sets the optional mode attribute to value.
+// If not specified, defaults to "MIN_COMBINED"
+func DequantizeMode(value string) DequantizeAttr {
return func(m optionalAttr) {
- m["data_format"] = value
+ m["mode"] = value
}
}
-// The backward operation for "BiasAdd" on the "bias" tensor.
+// Dequantize the 'input' tensor into a float Tensor.
//
-// It accumulates all the values from out_backprop into the feature dimension.
-// For NHWC data format, the feature dimension is the last. For NCHW data format,
-// the feature dimension is the third-to-last.
+// [min_range, max_range] are scalar floats that specify the range for
+// the 'input' data. The 'mode' attribute controls exactly which calculations are
+// used to convert the float values to their quantized equivalents.
+//
+// In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
+//
+// ```
+// if T == qint8, in[i] += (range(T) + 1)/ 2.0
+// out[i] = min_range + (in[i]* (max_range - min_range) / range(T))
+// ```
+// here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
+//
+// *MIN_COMBINED Mode Example*
+//
+// If the input comes from a QuantizedRelu6, the output type is
+// quint8 (range of 0-255) but the possible range of QuantizedRelu6 is
+// 0-6. The min_range and max_range values are therefore 0.0 and 6.0.
+// Dequantize on quint8 will take each value, cast to float, and multiply
+// by 6 / 255.
+// Note that if quantizedtype is qint8, the operation will additionally add
+// each value by 128 prior to casting.
+//
+// If the mode is 'MIN_FIRST', then this approach is used:
+//
+// ```c++
+// num_discrete_values = 1 << (# of bits in T)
+// range_adjust = num_discrete_values / (num_discrete_values - 1)
+// range = (range_max - range_min) * range_adjust
+// range_scale = range / num_discrete_values
+// const double offset_input = static_cast<double>(input) - lowest_quantized;
+// result = range_min + ((input - numeric_limits<T>::min()) * range_scale)
+// ```
+//
+// *SCALED mode Example*
+//
+// `SCALED` mode matches the quantization approach used in
+// `QuantizeAndDequantize{V2|V3}`.
+//
+// If the mode is `SCALED`, we do not use the full range of the output type,
+// choosing to elide the lowest possible value for symmetry (e.g., output range is
+// -127 to 127, not -128 to 127 for signed 8 bit quantization), so that 0.0 maps to
+// 0.
+//
+// We first find the range of values in our tensor. The
+// range we use is always centered on 0, so we find m such that
+// ```c++
+// m = max(abs(input_min), abs(input_max))
+// ```
+//
+// Our input tensor range is then `[-m, m]`.
+//
+// Next, we choose our fixed-point quantization buckets, `[min_fixed, max_fixed]`.
+// If T is signed, this is
+// ```
+// num_bits = sizeof(T) * 8
+// [min_fixed, max_fixed] =
+// [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1]
+// ```
+//
+// Otherwise, if T is unsigned, the fixed-point range is
+// ```
+// [min_fixed, max_fixed] = [0, (1 << num_bits) - 1]
+// ```
+//
+// From this we compute our scaling factor, s:
+// ```c++
+// s = (2 * m) / (max_fixed - min_fixed)
+// ```
+//
+// Now we can dequantize the elements of our tensor:
+// ```c++
+// result = input * s
+// ```
//
// Arguments:
-// out_backprop: Any number of dimensions.
//
-// Returns 1-D with size the feature dimension of `out_backprop`.
-func BiasAddGrad(scope *Scope, out_backprop tf.Output, optional ...BiasAddGradAttr) (output tf.Output) {
+// min_range: The minimum scalar value possibly produced for the input.
+// max_range: The maximum scalar value possibly produced for the input.
+func Dequantize(scope *Scope, input tf.Output, min_range tf.Output, max_range tf.Output, optional ...DequantizeAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -26277,9 +25929,9 @@ func BiasAddGrad(scope *Scope, out_backprop tf.Output, optional ...BiasAddGradAt
a(attrs)
}
opspec := tf.OpSpec{
- Type: "BiasAddGrad",
+ Type: "Dequantize",
Input: []tf.Input{
- out_backprop,
+ input, min_range, max_range,
},
Attrs: attrs,
}
@@ -26287,227 +25939,383 @@ func BiasAddGrad(scope *Scope, out_backprop tf.Output, optional ...BiasAddGradAt
return op.Output(0)
}
-// Compute the upper regularized incomplete Gamma function `Q(a, x)`.
-//
-// The upper regularized incomplete Gamma function is defined as:
-//
-// \\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\)
-//
-// where
+// Returns the element-wise max of two SparseTensors.
//
-// \\(Gamma(a, x) = int_{x}^{\infty} t^{a-1} exp(-t) dt\\)
+// Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
//
-// is the upper incomplete Gama function.
+// Arguments:
+// a_indices: 2-D. `N x R` matrix with the indices of non-empty values in a
+// SparseTensor, in the canonical lexicographic ordering.
+// a_values: 1-D. `N` non-empty values corresponding to `a_indices`.
+// a_shape: 1-D. Shape of the input SparseTensor.
+// b_indices: counterpart to `a_indices` for the other operand.
+// b_values: counterpart to `a_values` for the other operand; must be of the same dtype.
+// b_shape: counterpart to `a_shape` for the other operand; the two shapes must be equal.
//
-// Note, above `P(a, x)` (`Igamma`) is the lower regularized complete
-// Gamma function.
-func Igammac(scope *Scope, a tf.Output, x tf.Output) (z tf.Output) {
+// Returns 2-D. The indices of the output SparseTensor.1-D. The values of the output SparseTensor.
+func SparseSparseMaximum(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b_indices tf.Output, b_values tf.Output, b_shape tf.Output) (output_indices tf.Output, output_values tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Igammac",
+ Type: "SparseSparseMaximum",
Input: []tf.Input{
- a, x,
+ a_indices, a_values, a_shape, b_indices, b_values, b_shape,
},
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1)
}
-// Compute the lower regularized incomplete Gamma function `Q(a, x)`.
-//
-// The lower regularized incomplete Gamma function is defined as:
+// Returns a batched matrix tensor with new batched diagonal values.
//
+// Given `input` and `diagonal`, this operation returns a tensor with the
+// same shape and values as `input`, except for the main diagonal of the
+// innermost matrices. These will be overwritten by the values in `diagonal`.
//
-// \\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\\)
+// The output is computed as follows:
//
-// where
+// Assume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has
+// `k` dimensions `[I, J, K, ..., min(M, N)]`. Then the output is a
+// tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where:
//
-// \\(gamma(a, x) = int_{0}^{x} t^{a-1} exp(-t) dt\\)
+// * `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.
+// * `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`.
//
-// is the lower incomplete Gamma function.
+// Arguments:
+// input: Rank `k+1`, where `k >= 1`.
+// diagonal: Rank `k`, where `k >= 1`.
//
-// Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete
-// Gamma function.
-func Igamma(scope *Scope, a tf.Output, x tf.Output) (z tf.Output) {
+// Returns Rank `k+1`, with `output.shape = input.shape`.
+func MatrixSetDiag(scope *Scope, input tf.Output, diagonal tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Igamma",
+ Type: "MatrixSetDiag",
Input: []tf.Input{
- a, x,
+ input, diagonal,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Computes arctangent of `y/x` element-wise, respecting signs of the arguments.
+// EditDistanceAttr is an optional argument to EditDistance.
+type EditDistanceAttr func(optionalAttr)
+
+// EditDistanceNormalize sets the optional normalize attribute to value.
//
-// This is the angle \( \theta \in [-\pi, \pi] \) such that
-// \[ x = r \cos(\theta) \]
+// value: boolean (if true, edit distances are normalized by length of truth).
+//
+// The output is:
+// If not specified, defaults to true
+func EditDistanceNormalize(value bool) EditDistanceAttr {
+ return func(m optionalAttr) {
+ m["normalize"] = value
+ }
+}
+
+// Computes the (possibly normalized) Levenshtein Edit Distance.
+//
+// The inputs are variable-length sequences provided by SparseTensors
+// (hypothesis_indices, hypothesis_values, hypothesis_shape)
// and
-// \[ y = r \sin(\theta) \]
-// where \(r = \sqrt(x^2 + y^2) \).
-func Atan2(scope *Scope, y tf.Output, x tf.Output) (z tf.Output) {
+// (truth_indices, truth_values, truth_shape).
+//
+// The inputs are:
+//
+// Arguments:
+// hypothesis_indices: The indices of the hypothesis list SparseTensor.
+// This is an N x R int64 matrix.
+// hypothesis_values: The values of the hypothesis list SparseTensor.
+// This is an N-length vector.
+// hypothesis_shape: The shape of the hypothesis list SparseTensor.
+// This is an R-length vector.
+// truth_indices: The indices of the truth list SparseTensor.
+// This is an M x R int64 matrix.
+// truth_values: The values of the truth list SparseTensor.
+// This is an M-length vector.
+// truth_shape: truth indices, vector.
+//
+// Returns A dense float tensor with rank R - 1.
+//
+// For the example input:
+//
+// // hypothesis represents a 2x1 matrix with variable-length values:
+// // (0,0) = ["a"]
+// // (1,0) = ["b"]
+// hypothesis_indices = [[0, 0, 0],
+// [1, 0, 0]]
+// hypothesis_values = ["a", "b"]
+// hypothesis_shape = [2, 1, 1]
+//
+// // truth represents a 2x2 matrix with variable-length values:
+// // (0,0) = []
+// // (0,1) = ["a"]
+// // (1,0) = ["b", "c"]
+// // (1,1) = ["a"]
+// truth_indices = [[0, 1, 0],
+// [1, 0, 0],
+// [1, 0, 1],
+// [1, 1, 0]]
+// truth_values = ["a", "b", "c", "a"]
+// truth_shape = [2, 2, 2]
+// normalize = true
+//
+// The output will be:
+//
+// // output is a 2x2 matrix with edit distances normalized by truth lengths.
+// output = [[inf, 1.0], // (0,0): no truth, (0,1): no hypothesis
+// [0.5, 1.0]] // (1,0): addition, (1,1): no hypothesis
+func EditDistance(scope *Scope, hypothesis_indices tf.Output, hypothesis_values tf.Output, hypothesis_shape tf.Output, truth_indices tf.Output, truth_values tf.Output, truth_shape tf.Output, optional ...EditDistanceAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "Atan2",
+ Type: "EditDistance",
Input: []tf.Input{
- y, x,
+ hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Compute the regularized incomplete beta integral \\(I_x(a, b)\\).
+// Gather slices from `params` into a Tensor with shape specified by `indices`.
//
-// The regularized incomplete beta integral is defined as:
+// `indices` is an K-dimensional integer tensor, best thought of as a
+// (K-1)-dimensional tensor of indices into `params`, where each element defines a
+// slice of `params`:
+//
+// output[i_0, ..., i_{K-2}] = params[indices[i0, ..., i_{K-2}]]
//
+// Whereas in @{tf.gather} `indices` defines slices into the first
+// dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the
+// first `N` dimensions of `params`, where `N = indices.shape[-1]`.
//
-// \\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\\)
+// The last dimension of `indices` can be at most the rank of
+// `params`:
//
-// where
+// indices.shape[-1] <= params.rank
//
+// The last dimension of `indices` corresponds to elements
+// (if `indices.shape[-1] == params.rank`) or slices
+// (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`
+// of `params`. The output tensor has shape
//
-// \\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\\)
+// indices.shape[:-1] + params.shape[indices.shape[-1]:]
//
+// Some examples below.
//
-// is the incomplete beta function and \\(B(a, b)\\) is the *complete*
-// beta function.
-func Betainc(scope *Scope, a tf.Output, b tf.Output, x tf.Output) (z tf.Output) {
+// Simple indexing into a matrix:
+//
+// ```python
+// indices = [[0, 0], [1, 1]]
+// params = [['a', 'b'], ['c', 'd']]
+// output = ['a', 'd']
+// ```
+//
+// Slice indexing into a matrix:
+//
+// ```python
+// indices = [[1], [0]]
+// params = [['a', 'b'], ['c', 'd']]
+// output = [['c', 'd'], ['a', 'b']]
+// ```
+//
+// Indexing into a 3-tensor:
+//
+// ```python
+// indices = [[1]]
+// params = [[['a0', 'b0'], ['c0', 'd0']],
+// [['a1', 'b1'], ['c1', 'd1']]]
+// output = [[['a1', 'b1'], ['c1', 'd1']]]
+//
+//
+// indices = [[0, 1], [1, 0]]
+// params = [[['a0', 'b0'], ['c0', 'd0']],
+// [['a1', 'b1'], ['c1', 'd1']]]
+// output = [['c0', 'd0'], ['a1', 'b1']]
+//
+//
+// indices = [[0, 0, 1], [1, 0, 1]]
+// params = [[['a0', 'b0'], ['c0', 'd0']],
+// [['a1', 'b1'], ['c1', 'd1']]]
+// output = ['b0', 'b1']
+// ```
+//
+// Batched indexing into a matrix:
+//
+// ```python
+// indices = [[[0, 0]], [[0, 1]]]
+// params = [['a', 'b'], ['c', 'd']]
+// output = [['a'], ['b']]
+// ```
+//
+// Batched slice indexing into a matrix:
+//
+// ```python
+// indices = [[[1]], [[0]]]
+// params = [['a', 'b'], ['c', 'd']]
+// output = [[['c', 'd']], [['a', 'b']]]
+// ```
+//
+// Batched indexing into a 3-tensor:
+//
+// ```python
+// indices = [[[1]], [[0]]]
+// params = [[['a0', 'b0'], ['c0', 'd0']],
+// [['a1', 'b1'], ['c1', 'd1']]]
+// output = [[[['a1', 'b1'], ['c1', 'd1']]],
+// [[['a0', 'b0'], ['c0', 'd0']]]]
+//
+// indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
+// params = [[['a0', 'b0'], ['c0', 'd0']],
+// [['a1', 'b1'], ['c1', 'd1']]]
+// output = [[['c0', 'd0'], ['a1', 'b1']],
+// [['a0', 'b0'], ['c1', 'd1']]]
+//
+//
+// indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
+// params = [[['a0', 'b0'], ['c0', 'd0']],
+// [['a1', 'b1'], ['c1', 'd1']]]
+// output = [['b0', 'b1'], ['d0', 'c1']]
+// ```
+//
+// Arguments:
+// params: The tensor from which to gather values.
+// indices: Index tensor.
+//
+// Returns Values from `params` gathered from indices given by `indices`, with
+// shape `indices.shape[:-1] + params.shape[indices.shape[-1]:]`.
+func GatherNd(scope *Scope, params tf.Output, indices tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Betainc",
+ Type: "GatherNd",
Input: []tf.Input{
- a, b, x,
+ params, indices,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Returns the truth value of x OR y element-wise.
+// Eagerly executes a python function to compute func(input)->output. The
//
-// *NOTE*: `LogicalOr` supports broadcasting. More about broadcasting
-// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-func LogicalOr(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
+// semantics of the input, output, and attributes are the same as those for
+// PyFunc.
+func EagerPyFunc(scope *Scope, input []tf.Output, token string, Tout []tf.DataType) (output []tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"token": token, "Tout": Tout}
opspec := tf.OpSpec{
- Type: "LogicalOr",
+ Type: "EagerPyFunc",
Input: []tf.Input{
- x, y,
+ tf.OutputList(input),
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ if scope.Err() != nil {
+ return
+ }
+ var idx int
+ var err error
+ if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
+ scope.UpdateErr("EagerPyFunc", err)
+ return
+ }
+ return output
}
-// Selects elements from `x` or `y`, depending on `condition`.
-//
-// The `x`, and `y` tensors must all have the same shape, and the
-// output will also have that shape.
-//
-// The `condition` tensor must be a scalar if `x` and `y` are scalars.
-// If `x` and `y` are vectors or higher rank, then `condition` must be either a
-// scalar, a vector with size matching the first dimension of `x`, or must have
-// the same shape as `x`.
-//
-// The `condition` tensor acts as a mask that chooses, based on the value at each
-// element, whether the corresponding element / row in the output should be
-// taken from `x` (if true) or `y` (if false).
-//
-// If `condition` is a vector and `x` and `y` are higher rank matrices, then
-// it chooses which row (outer dimension) to copy from `x` and `y`.
-// If `condition` has the same shape as `x` and `y`, then it chooses which
-// element to copy from `x` and `y`.
-//
-// For example:
-//
-// ```python
-// # 'condition' tensor is [[True, False]
-// # [False, True]]
-// # 't' is [[1, 2],
-// # [3, 4]]
-// # 'e' is [[5, 6],
-// # [7, 8]]
-// select(condition, t, e) # => [[1, 6], [7, 4]]
-//
-//
-// # 'condition' tensor is [True, False]
-// # 't' is [[1, 2],
-// # [3, 4]]
-// # 'e' is [[5, 6],
-// # [7, 8]]
-// select(condition, t, e) ==> [[1, 2],
-// [7, 8]]
+// Stops gradient computation.
//
-// ```
+// When executed in a graph, this op outputs its input tensor as-is.
//
-// Arguments:
+// When building ops to compute gradients, this op prevents the contribution of
+// its inputs to be taken into account. Normally, the gradient generator adds ops
+// to a graph to compute the derivatives of a specified 'loss' by recursively
+// finding out inputs that contributed to its computation. If you insert this op
+// in the graph it inputs are masked from the gradient generator. They are not
+// taken into account for computing gradients.
//
-// x: = A `Tensor` which may have the same shape as `condition`.
-// If `condition` is rank 1, `x` may have higher rank,
-// but its first dimension must match the size of `condition`.
-// y: = A `Tensor` with the same type and shape as `x`.
+// This is useful any time you want to compute a value with TensorFlow but need
+// to pretend that the value was a constant. Some examples include:
//
-// Returns = A `Tensor` with the same type and shape as `x` and `y`.
-func Select(scope *Scope, condition tf.Output, x tf.Output, y tf.Output) (output tf.Output) {
+// * The *EM* algorithm where the *M-step* should not involve backpropagation
+// through the output of the *E-step*.
+// * Contrastive divergence training of Boltzmann machines where, when
+// differentiating the energy function, the training must not backpropagate
+// through the graph that generated the samples from the model.
+// * Adversarial training, where no backprop should happen through the adversarial
+// example generation process.
+func StopGradient(scope *Scope, input tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Select",
+ Type: "StopGradient",
Input: []tf.Input{
- condition, x, y,
+ input,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// MatMulAttr is an optional argument to MatMul.
-type MatMulAttr func(optionalAttr)
-
-// MatMulTransposeA sets the optional transpose_a attribute to value.
-//
-// value: If true, "a" is transposed before multiplication.
-// If not specified, defaults to false
-func MatMulTransposeA(value bool) MatMulAttr {
- return func(m optionalAttr) {
- m["transpose_a"] = value
+// Computes asin of x element-wise.
+func Asin(scope *Scope, x tf.Output) (y tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "Asin",
+ Input: []tf.Input{
+ x,
+ },
}
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
}
-// MatMulTransposeB sets the optional transpose_b attribute to value.
+// PreventGradientAttr is an optional argument to PreventGradient.
+type PreventGradientAttr func(optionalAttr)
+
+// PreventGradientMessage sets the optional message attribute to value.
//
-// value: If true, "b" is transposed before multiplication.
-// If not specified, defaults to false
-func MatMulTransposeB(value bool) MatMulAttr {
+// value: Will be printed in the error when anyone tries to differentiate
+// this operation.
+// If not specified, defaults to ""
+func PreventGradientMessage(value string) PreventGradientAttr {
return func(m optionalAttr) {
- m["transpose_b"] = value
+ m["message"] = value
}
}
-// Multiply the matrix "a" by the matrix "b".
+// An identity op that triggers an error if a gradient is requested.
//
-// The inputs must be two-dimensional matrices and the inner dimension of
-// "a" (after being transposed if transpose_a is true) must match the
-// outer dimension of "b" (after being transposed if transposed_b is
-// true).
+// When executed in a graph, this op outputs its input tensor as-is.
//
-// *Note*: The default kernel implementation for MatMul on GPUs uses
-// cublas.
-func MatMul(scope *Scope, a tf.Output, b tf.Output, optional ...MatMulAttr) (product tf.Output) {
+// When building ops to compute gradients, the TensorFlow gradient system
+// will return an error when trying to lookup the gradient of this op,
+// because no gradient must ever be registered for this function. This
+// op exists to prevent subtle bugs from silently returning unimplemented
+// gradients in some corner cases.
+//
+// Arguments:
+// input: any tensor.
+//
+// Returns the same input tensor.
+func PreventGradient(scope *Scope, input tf.Output, optional ...PreventGradientAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -26516,9 +26324,9 @@ func MatMul(scope *Scope, a tf.Output, b tf.Output, optional ...MatMulAttr) (pro
a(attrs)
}
opspec := tf.OpSpec{
- Type: "MatMul",
+ Type: "PreventGradient",
Input: []tf.Input{
- a, b,
+ input,
},
Attrs: attrs,
}
@@ -26526,44 +26334,23 @@ func MatMul(scope *Scope, a tf.Output, b tf.Output, optional ...MatMulAttr) (pro
return op.Output(0)
}
-// MeanAttr is an optional argument to Mean.
-type MeanAttr func(optionalAttr)
-
-// MeanKeepDims sets the optional keep_dims attribute to value.
-//
-// value: If true, retain reduced dimensions with length 1.
-// If not specified, defaults to false
-func MeanKeepDims(value bool) MeanAttr {
- return func(m optionalAttr) {
- m["keep_dims"] = value
- }
-}
-
-// Computes the mean of elements across dimensions of a tensor.
+// Checks a tensor for NaN and Inf values.
//
-// Reduces `input` along the dimensions given in `axis`. Unless
-// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
-// `axis`. If `keep_dims` is true, the reduced dimensions are
-// retained with length 1.
+// When run, reports an `InvalidArgument` error if `tensor` has any values
+// that are not a number (NaN) or infinity (Inf). Otherwise, passes `tensor` as-is.
//
// Arguments:
-// input: The tensor to reduce.
-// axis: The dimensions to reduce. Must be in the range
-// `[-rank(input), rank(input))`.
//
-// Returns The reduced tensor.
-func Mean(scope *Scope, input tf.Output, axis tf.Output, optional ...MeanAttr) (output tf.Output) {
+// message: Prefix of the error message.
+func CheckNumerics(scope *Scope, tensor tf.Output, message string) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
+ attrs := map[string]interface{}{"message": message}
opspec := tf.OpSpec{
- Type: "Mean",
+ Type: "CheckNumerics",
Input: []tf.Input{
- input, axis,
+ tensor,
},
Attrs: attrs,
}
@@ -26571,46 +26358,62 @@ func Mean(scope *Scope, input tf.Output, axis tf.Output, optional ...MeanAttr) (
return op.Output(0)
}
-// Returns which elements of x are finite.
+// Shuffle dimensions of x according to a permutation and conjugate the result.
//
-// @compatibility(numpy)
-// Equivalent to np.isfinite
-// @end_compatibility
-func IsFinite(scope *Scope, x tf.Output) (y tf.Output) {
+// The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
+// `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
+// `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])`
+func ConjugateTranspose(scope *Scope, x tf.Output, perm tf.Output) (y tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "IsFinite",
+ Type: "ConjugateTranspose",
Input: []tf.Input{
- x,
+ x, perm,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// ArgMaxAttr is an optional argument to ArgMax.
-type ArgMaxAttr func(optionalAttr)
+// UniqueV2Attr is an optional argument to UniqueV2.
+type UniqueV2Attr func(optionalAttr)
-// ArgMaxOutputType sets the optional output_type attribute to value.
-// If not specified, defaults to DT_INT64
-func ArgMaxOutputType(value tf.DataType) ArgMaxAttr {
+// UniqueV2OutIdx sets the optional out_idx attribute to value.
+// If not specified, defaults to DT_INT32
+func UniqueV2OutIdx(value tf.DataType) UniqueV2Attr {
return func(m optionalAttr) {
- m["output_type"] = value
+ m["out_idx"] = value
}
}
-// Returns the index with the largest value across dimensions of a tensor.
+// Finds unique elements in a 1-D tensor.
//
-// Note that in case of ties the identity of the return value is not guaranteed.
+// This operation returns a tensor `y` containing all of the unique elements of `x`
+// sorted in the same order that they occur in `x`. This operation also returns a
+// tensor `idx` the same size as `x` that contains the index of each value of `x`
+// in the unique output `y`. In other words:
+//
+// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
+//
+// For example:
+//
+// ```
+// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
+// y, idx = unique(x)
+// y ==> [1, 2, 4, 7, 8]
+// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
+// ```
//
// Arguments:
+// x: A `Tensor`.
+// axis: A `Tensor` of type `int64` (default: 0). The axis of the Tensor to
+// find the unique elements.
//
-// dimension: int32 or int64, must be in the range `[-rank(input), rank(input))`.
-// Describes which dimension of the input Tensor to reduce across. For vectors,
-// use dimension = 0.
-func ArgMax(scope *Scope, input tf.Output, dimension tf.Output, optional ...ArgMaxAttr) (output tf.Output) {
+// Returns A `Tensor`. Unique elements along the `axis` of `Tensor` x.A 1-D Tensor. Has the same type as x that contains the index of each
+// value of x in the output y.
+func UniqueV2(scope *Scope, x tf.Output, axis tf.Output, optional ...UniqueV2Attr) (y tf.Output, idx tf.Output) {
if scope.Err() != nil {
return
}
@@ -26619,83 +26422,112 @@ func ArgMax(scope *Scope, input tf.Output, dimension tf.Output, optional ...ArgM
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ArgMax",
+ Type: "UniqueV2",
Input: []tf.Input{
- input, dimension,
+ x, axis,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1)
}
-// Computes the sum along segments of a tensor.
-//
-// Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
-// segments.
-//
-// Computes a tensor such that
-// \\(output_i = \sum_j data_j\\) where sum is over `j` such
-// that `segment_ids[j] == i`.
+// Return a slice from 'input'.
//
-// If the sum is empty for a given segment ID `i`, `output[i] = 0`.
+// The output tensor is a tensor with dimensions described by 'size'
+// whose values are extracted from 'input' starting at the offsets in
+// 'begin'.
//
-// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentSum.png" alt>
-// </div>
+// *Requirements*:
+// 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n)
//
// Arguments:
//
-// segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s
-// first dimension. Values should be sorted and can be repeated.
-//
-// Returns Has same shape as data, except for dimension 0 which
-// has size `k`, the number of segments.
-func SegmentSum(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
+// begin: begin[i] specifies the offset into the 'i'th dimension of
+// 'input' to slice from.
+// size: size[i] specifies the number of elements of the 'i'th dimension
+// of 'input' to slice. If size[i] is -1, all remaining elements in dimension
+// i are included in the slice (i.e. this is equivalent to setting
+// size[i] = input.dim_size(i) - begin[i]).
+func Slice(scope *Scope, input tf.Output, begin tf.Output, size tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "SegmentSum",
+ Type: "Slice",
Input: []tf.Input{
- data, segment_ids,
+ input, begin, size,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Bucketizes 'input' based on 'boundaries'.
-//
-// For example, if the inputs are
-// boundaries = [0, 10, 100]
-// input = [[-5, 10000]
-// [150, 10]
-// [5, 100]]
-//
-// then the output will be
-// output = [[0, 3]
-// [3, 2]
-// [1, 3]]
-//
-// Arguments:
-// input: Any shape of Tensor contains with int or float type.
-// boundaries: A sorted list of floats gives the boundary of the buckets.
+// StridedSliceGradAttr is an optional argument to StridedSliceGrad.
+type StridedSliceGradAttr func(optionalAttr)
+
+// StridedSliceGradBeginMask sets the optional begin_mask attribute to value.
+// If not specified, defaults to 0
+func StridedSliceGradBeginMask(value int64) StridedSliceGradAttr {
+ return func(m optionalAttr) {
+ m["begin_mask"] = value
+ }
+}
+
+// StridedSliceGradEndMask sets the optional end_mask attribute to value.
+// If not specified, defaults to 0
+func StridedSliceGradEndMask(value int64) StridedSliceGradAttr {
+ return func(m optionalAttr) {
+ m["end_mask"] = value
+ }
+}
+
+// StridedSliceGradEllipsisMask sets the optional ellipsis_mask attribute to value.
+// If not specified, defaults to 0
+func StridedSliceGradEllipsisMask(value int64) StridedSliceGradAttr {
+ return func(m optionalAttr) {
+ m["ellipsis_mask"] = value
+ }
+}
+
+// StridedSliceGradNewAxisMask sets the optional new_axis_mask attribute to value.
+// If not specified, defaults to 0
+func StridedSliceGradNewAxisMask(value int64) StridedSliceGradAttr {
+ return func(m optionalAttr) {
+ m["new_axis_mask"] = value
+ }
+}
+
+// StridedSliceGradShrinkAxisMask sets the optional shrink_axis_mask attribute to value.
+// If not specified, defaults to 0
+func StridedSliceGradShrinkAxisMask(value int64) StridedSliceGradAttr {
+ return func(m optionalAttr) {
+ m["shrink_axis_mask"] = value
+ }
+}
+
+// Returns the gradient of `StridedSlice`.
//
-// Returns Same shape with 'input', each value of input replaced with bucket index.
+// Since `StridedSlice` cuts out pieces of its `input` which is size
+// `shape`, its gradient will have the same shape (which is passed here
+// as `shape`). The gradient will be zero in any element that the slice
+// does not select.
//
-// @compatibility(numpy)
-// Equivalent to np.digitize.
-// @end_compatibility
-func Bucketize(scope *Scope, input tf.Output, boundaries []float32) (output tf.Output) {
+// Arguments are the same as StridedSliceGrad with the exception that
+// `dy` is the input gradient to be propagated and `shape` is the
+// shape of `StridedSlice`'s `input`.
+func StridedSliceGrad(scope *Scope, shape tf.Output, begin tf.Output, end tf.Output, strides tf.Output, dy tf.Output, optional ...StridedSliceGradAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"boundaries": boundaries}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "Bucketize",
+ Type: "StridedSliceGrad",
Input: []tf.Input{
- input,
+ shape, begin, end, strides, dy,
},
Attrs: attrs,
}
@@ -26703,284 +26535,401 @@ func Bucketize(scope *Scope, input tf.Output, boundaries []float32) (output tf.O
return op.Output(0)
}
-// Reshapes a SparseTensor to represent values in a new dense shape.
-//
-// This operation has the same semantics as reshape on the represented dense
-// tensor. The `input_indices` are recomputed based on the requested `new_shape`.
-//
-// If one component of `new_shape` is the special value -1, the size of that
-// dimension is computed so that the total dense size remains constant. At
-// most one component of `new_shape` can be -1. The number of dense elements
-// implied by `new_shape` must be the same as the number of dense elements
-// originally implied by `input_shape`.
-//
-// Reshaping does not affect the order of values in the SparseTensor.
-//
-// If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape`
-// has length `R_out`, then `input_indices` has shape `[N, R_in]`,
-// `input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and
-// `output_shape` has length `R_out`.
+// Returns the gradient of `Tile`.
//
-// Arguments:
-// input_indices: 2-D. `N x R_in` matrix with the indices of non-empty values in a
-// SparseTensor.
-// input_shape: 1-D. `R_in` vector with the input SparseTensor's dense shape.
-// new_shape: 1-D. `R_out` vector with the requested new dense shape.
+// DEPRECATED at GraphDef version 3: TileGrad has been replaced with reduce_sum
//
-// Returns 2-D. `N x R_out` matrix with the updated indices of non-empty
-// values in the output SparseTensor.1-D. `R_out` vector with the full dense shape of the output
-// SparseTensor. This is the same as `new_shape` but with any -1 dimensions
-// filled in.
-func SparseReshape(scope *Scope, input_indices tf.Output, input_shape tf.Output, new_shape tf.Output) (output_indices tf.Output, output_shape tf.Output) {
+// Since `Tile` takes an input and repeats the input `multiples` times
+// along each dimension, `TileGrad` takes in `multiples` and aggregates
+// each repeated tile of `input` into `output`.
+func TileGrad(scope *Scope, input tf.Output, multiples tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "SparseReshape",
+ Type: "TileGrad",
Input: []tf.Input{
- input_indices, input_shape, new_shape,
+ input, multiples,
},
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1)
+ return op.Output(0)
}
-// Computes the product along segments of a tensor.
-//
-// Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
-// segments.
+// DataFormatDimMapAttr is an optional argument to DataFormatDimMap.
+type DataFormatDimMapAttr func(optionalAttr)
+
+// DataFormatDimMapSrcFormat sets the optional src_format attribute to value.
//
-// Computes a tensor such that
-// \\(output_i = \prod_j data_j\\) where the product is over `j` such
-// that `segment_ids[j] == i`.
+// value: source data format.
+// If not specified, defaults to "NHWC"
+func DataFormatDimMapSrcFormat(value string) DataFormatDimMapAttr {
+ return func(m optionalAttr) {
+ m["src_format"] = value
+ }
+}
+
+// DataFormatDimMapDstFormat sets the optional dst_format attribute to value.
//
-// If the product is empty for a given segment ID `i`, `output[i] = 1`.
+// value: destination data format.
+// If not specified, defaults to "NCHW"
+func DataFormatDimMapDstFormat(value string) DataFormatDimMapAttr {
+ return func(m optionalAttr) {
+ m["dst_format"] = value
+ }
+}
+
+// Returns the dimension index in the destination data format given the one in
//
-// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentProd.png" alt>
-// </div>
+// the source data format.
//
// Arguments:
+// x: A Tensor with each element as a dimension index in source data format.
+// Must be in the range [-4, 4).
//
-// segment_ids: A 1-D tensor whose rank is equal to the rank of `data`'s
-// first dimension. Values should be sorted and can be repeated.
-//
-// Returns Has same shape as data, except for dimension 0 which
-// has size `k`, the number of segments.
-func SegmentProd(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
+// Returns A Tensor with each element as a dimension index in destination data format.
+func DataFormatDimMap(scope *Scope, x tf.Output, optional ...DataFormatDimMapAttr) (y tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "SegmentProd",
+ Type: "DataFormatDimMap",
Input: []tf.Input{
- data, segment_ids,
+ x,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Computes the sum along segments of a tensor.
-//
-// Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
-// segments.
-//
-// Computes a tensor such that
-// `(output[i] = sum_{j...} data[j...]` where the sum is over tuples `j...` such
-// that `segment_ids[j...] == i`. Unlike `SegmentSum`, `segment_ids`
-// need not be sorted and need not cover all values in the full
-// range of valid values.
-//
-// If the sum is empty for a given segment ID `i`, `output[i] = 0`.
-// If the given segment ID `i` is negative, the value is dropped and will not be
-// added to the sum of the segment.
-//
-// `num_segments` should equal the number of distinct segment IDs.
-//
-// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-// <img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentSum.png" alt>
-// </div>
-//
-// Arguments:
-//
-// segment_ids: A tensor whose shape is a prefix of `data.shape`.
-//
+// Return the shape of s0 op s1 with broadcast.
//
-// Returns Has same shape as data, except for the first `segment_ids.rank`
-// dimensions, which are replaced with a single dimension which has size
-// `num_segments`.
-func UnsortedSegmentSum(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
+// Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the
+// broadcasted shape. `s0`, `s1` and `r0` are all integer vectors.
+func BroadcastArgs(scope *Scope, s0 tf.Output, s1 tf.Output) (r0 tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "UnsortedSegmentSum",
+ Type: "BroadcastArgs",
Input: []tf.Input{
- data, segment_ids, num_segments,
+ s0, s1,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Computes hyperbolic sine of x element-wise.
-func Sinh(scope *Scope, x tf.Output) (y tf.Output) {
+// Return the reduction indices for computing gradients of s0 op s1 with broadcast.
+//
+// This is typically used by gradient computations for a broadcasting operation.
+func BroadcastGradientArgs(scope *Scope, s0 tf.Output, s1 tf.Output) (r0 tf.Output, r1 tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "Sinh",
+ Type: "BroadcastGradientArgs",
Input: []tf.Input{
- x,
+ s0, s1,
},
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ return op.Output(0), op.Output(1)
}
-// Computes the sum along sparse segments of a tensor.
+// Pads a tensor with mirrored values.
//
-// Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
-// segments.
+// This operation pads a `input` with mirrored values according to the `paddings`
+// you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is
+// the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
+// how many values to add before the contents of `input` in that dimension, and
+// `paddings[D, 1]` indicates how many values to add after the contents of `input`
+// in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater
+// than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true
+// (if false, respectively).
//
-// Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first
-// dimension, selecting a subset of dimension 0, specified by `indices`.
+// The padded size of each dimension D of the output is:
+//
+// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
//
// For example:
//
-// ```python
-// c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
+// ```
+// # 't' is [[1, 2, 3], [4, 5, 6]].
+// # 'paddings' is [[1, 1]], [2, 2]].
+// # 'mode' is SYMMETRIC.
+// # rank of 't' is 2.
+// pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]
+// [2, 1, 1, 2, 3, 3, 2]
+// [5, 4, 4, 5, 6, 6, 5]
+// [5, 4, 4, 5, 6, 6, 5]]
+// ```
//
-// # Select two rows, one segment.
-// tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
-// # => [[0 0 0 0]]
+// Arguments:
+// input: The input tensor to be padded.
+// paddings: A two-column matrix specifying the padding sizes. The number of
+// rows must be the same as the rank of `input`.
+// mode: Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions
+// do not include the borders, while in symmetric mode the padded regions
+// do include the borders. For example, if `input` is `[1, 2, 3]` and `paddings`
+// is `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and
+// it is `[1, 2, 3, 3, 2]` in symmetric mode.
//
-// # Select two rows, two segment.
-// tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
-// # => [[ 1 2 3 4]
-// # [-1 -2 -3 -4]]
+// Returns The padded tensor.
+func MirrorPad(scope *Scope, input tf.Output, paddings tf.Output, mode string) (output tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{"mode": mode}
+ opspec := tf.OpSpec{
+ Type: "MirrorPad",
+ Input: []tf.Input{
+ input, paddings,
+ },
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
+// A placeholder op for a value that will be fed into the computation.
//
-// # Select all rows, two segments.
-// tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
-// # => [[0 0 0 0]
-// # [5 6 7 8]]
+// DEPRECATED at GraphDef version 23: Placeholder now behaves the same as PlaceholderV2.
//
-// # Which is equivalent to:
-// tf.segment_sum(c, tf.constant([0, 0, 1]))
-// ```
+// N.B. This operation will fail with an error if it is executed. It is
+// intended as a way to represent a value that will always be fed, and to
+// provide attrs that enable the fed value to be checked at runtime.
//
// Arguments:
+// dtype: The type of elements in the tensor.
+// shape: The shape of the tensor. The shape can be any partially-specified
+// shape. To be unconstrained, pass in a shape with unknown rank.
//
-// indices: A 1-D tensor. Has same rank as `segment_ids`.
-// segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
+// Returns A placeholder tensor that must be replaced using the feed mechanism.
+func PlaceholderV2(scope *Scope, dtype tf.DataType, shape tf.Shape) (output tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
+ opspec := tf.OpSpec{
+ Type: "PlaceholderV2",
+
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
+// ResourceApplyAdadeltaAttr is an optional argument to ResourceApplyAdadelta.
+type ResourceApplyAdadeltaAttr func(optionalAttr)
+
+// ResourceApplyAdadeltaUseLocking sets the optional use_locking attribute to value.
//
-// Returns Has same shape as data, except for dimension 0 which
-// has size `k`, the number of segments.
-func SparseSegmentSum(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output) (output tf.Output) {
+// value: If True, updating of the var, accum and update_accum tensors will be protected by
+// a lock; otherwise the behavior is undefined, but may exhibit less contention.
+// If not specified, defaults to false
+func ResourceApplyAdadeltaUseLocking(value bool) ResourceApplyAdadeltaAttr {
+ return func(m optionalAttr) {
+ m["use_locking"] = value
+ }
+}
+
+// Update '*var' according to the adadelta scheme.
+//
+// accum = rho() * accum + (1 - rho()) * grad.square();
+// update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;
+// update_accum = rho() * update_accum + (1 - rho()) * update.square();
+// var -= update;
+//
+// Arguments:
+// var_: Should be from a Variable().
+// accum: Should be from a Variable().
+// accum_update: Should be from a Variable().
+// lr: Scaling factor. Must be a scalar.
+// rho: Decay factor. Must be a scalar.
+// epsilon: Constant factor. Must be a scalar.
+// grad: The gradient.
+//
+// Returns the created operation.
+func ResourceApplyAdadelta(scope *Scope, var_ tf.Output, accum tf.Output, accum_update tf.Output, lr tf.Output, rho tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdadeltaAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "SparseSegmentSum",
+ Type: "ResourceApplyAdadelta",
Input: []tf.Input{
- data, indices, segment_ids,
+ var_, accum, accum_update, lr, rho, epsilon, grad,
},
+ Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// Counts the number of occurrences of each value in an integer array.
+// SqueezeAttr is an optional argument to Squeeze.
+type SqueezeAttr func(optionalAttr)
+
+// SqueezeAxis sets the optional axis attribute to value.
//
-// Outputs a vector with length `size` and the same dtype as `weights`. If
-// `weights` are empty, then index `i` stores the number of times the value `i` is
-// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of
-// the value in `weights` at each index where the corresponding value in `arr` is
-// `i`.
+// value: If specified, only squeezes the dimensions listed. The dimension
+// index starts at 0. It is an error to squeeze a dimension that is not 1. Must
+// be in the range `[-rank(input), rank(input))`.
+// If not specified, defaults to <>
//
-// Values in `arr` outside of the range [0, size) are ignored.
+// REQUIRES: len(value) >= 0
+func SqueezeAxis(value []int64) SqueezeAttr {
+ return func(m optionalAttr) {
+ m["squeeze_dims"] = value
+ }
+}
+
+// Removes dimensions of size 1 from the shape of a tensor.
+//
+// Given a tensor `input`, this operation returns a tensor of the same type with
+// all dimensions of size 1 removed. If you don't want to remove all size 1
+// dimensions, you can remove specific size 1 dimensions by specifying
+// `axis`.
+//
+// For example:
+//
+// ```
+// # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
+// shape(squeeze(t)) ==> [2, 3]
+// ```
+//
+// Or, to remove specific size 1 dimensions:
+//
+// ```
+// # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
+// shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
+// ```
//
// Arguments:
-// arr: int32 `Tensor`.
-// size: non-negative int32 scalar `Tensor`.
-// weights: is an int32, int64, float32, or float64 `Tensor` with the same
-// shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights
-// equal to 1.
+// input: The `input` to squeeze.
//
-// Returns 1D `Tensor` with length equal to `size`. The counts or summed weights for
-// each value in the range [0, size).
-func Bincount(scope *Scope, arr tf.Output, size tf.Output, weights tf.Output) (bins tf.Output) {
+// Returns Contains the same data as `input`, but has one or more dimensions of
+// size 1 removed.
+func Squeeze(scope *Scope, input tf.Output, optional ...SqueezeAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "Bincount",
+ Type: "Squeeze",
Input: []tf.Input{
- arr, size, weights,
+ input,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// BatchToSpace for 4-D tensors of type T.
-//
-// This is a legacy version of the more general BatchToSpaceND.
+// SpaceToBatch for N-D tensors of type T.
//
-// Rearranges (permutes) data from batch into blocks of spatial data, followed by
-// cropping. This is the reverse transformation of SpaceToBatch. More specifically,
-// this op outputs a copy of the input tensor where values from the `batch`
-// dimension are moved in spatial blocks to the `height` and `width` dimensions,
-// followed by cropping along the `height` and `width` dimensions.
+// This operation divides "spatial" dimensions `[1, ..., M]` of the input into a
+// grid of blocks of shape `block_shape`, and interleaves these blocks with the
+// "batch" dimension (0) such that in the output, the spatial dimensions
+// `[1, ..., M]` correspond to the position within the grid, and the batch
+// dimension combines both the position within a spatial block and the original
+// batch position. Prior to division into blocks, the spatial dimensions of the
+// input are optionally zero padded according to `paddings`. See below for a
+// precise description.
//
// Arguments:
-// input: 4-D tensor with shape
-// `[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
-// depth]`. Note that the batch size of the input tensor must be divisible by
-// `block_size * block_size`.
-// crops: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
-// how many elements to crop from the intermediate result across the spatial
-// dimensions as follows:
+// input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
+// where spatial_shape has `M` dimensions.
+// block_shape: 1-D with shape `[M]`, all values must be >= 1.
+// paddings: 2-D with shape `[M, 2]`, all values must be >= 0.
+// `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension
+// `i + 1`, which corresponds to spatial dimension `i`. It is required that
+// `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.
//
-// crops = [[crop_top, crop_bottom], [crop_left, crop_right]]
+// This operation is equivalent to the following steps:
//
+// 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the
+// input according to `paddings` to produce `padded` of shape `padded_shape`.
//
-// Returns 4-D with shape `[batch, height, width, depth]`, where:
+// 2. Reshape `padded` to `reshaped_padded` of shape:
//
-// height = height_pad - crop_top - crop_bottom
-// width = width_pad - crop_left - crop_right
+// [batch] +
+// [padded_shape[1] / block_shape[0],
+// block_shape[0],
+// ...,
+// padded_shape[M] / block_shape[M-1],
+// block_shape[M-1]] +
+// remaining_shape
//
-// The attr `block_size` must be greater than one. It indicates the block size.
+// 3. Permute dimensions of `reshaped_padded` to produce
+// `permuted_reshaped_padded` of shape:
+//
+// block_shape +
+// [batch] +
+// [padded_shape[1] / block_shape[0],
+// ...,
+// padded_shape[M] / block_shape[M-1]] +
+// remaining_shape
+//
+// 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch
+// dimension, producing an output tensor of shape:
+//
+// [batch * prod(block_shape)] +
+// [padded_shape[1] / block_shape[0],
+// ...,
+// padded_shape[M] / block_shape[M-1]] +
+// remaining_shape
//
// Some examples:
//
-// (1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2:
+// (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and
+// `paddings = [[0, 0], [0, 0]]`:
+//
+// ```
+// x = [[[[1], [2]], [[3], [4]]]]
+// ```
+//
+// The output tensor has shape `[4, 1, 1, 1]` and value:
//
// ```
// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
// ```
//
-// The output tensor has shape `[1, 2, 2, 1]` and value:
+// (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and
+// `paddings = [[0, 0], [0, 0]]`:
//
// ```
-// x = [[[[1], [2]], [[3], [4]]]]
+// x = [[[[1, 2, 3], [4, 5, 6]],
+// [[7, 8, 9], [10, 11, 12]]]]
// ```
//
-// (2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2:
+// The output tensor has shape `[4, 1, 1, 3]` and value:
//
// ```
// [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
// ```
//
-// The output tensor has shape `[1, 2, 2, 3]` and value:
+// (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and
+// `paddings = [[0, 0], [0, 0]]`:
//
// ```
-// x = [[[[1, 2, 3], [4, 5, 6]],
-// [[7, 8, 9], [10, 11, 12]]]]
+// x = [[[[1], [2], [3], [4]],
+// [[5], [6], [7], [8]],
+// [[9], [10], [11], [12]],
+// [[13], [14], [15], [16]]]]
// ```
//
-// (3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2:
+// The output tensor has shape `[4, 2, 2, 1]` and value:
//
// ```
// x = [[[[1], [3]], [[9], [11]]],
@@ -26989,142 +26938,133 @@ func Bincount(scope *Scope, arr tf.Output, size tf.Output, weights tf.Output) (b
// [[[6], [8]], [[14], [16]]]]
// ```
//
-// The output tensor has shape `[1, 4, 4, 1]` and value:
+// (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and
+// paddings = `[[0, 0], [2, 0]]`:
//
// ```
-// x = [[[1], [2], [3], [4]],
-// [[5], [6], [7], [8]],
-// [[9], [10], [11], [12]],
-// [[13], [14], [15], [16]]]
+// x = [[[[1], [2], [3], [4]],
+// [[5], [6], [7], [8]]],
+// [[[9], [10], [11], [12]],
+// [[13], [14], [15], [16]]]]
// ```
//
-// (4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2:
+// The output tensor has shape `[8, 1, 3, 1]` and value:
//
// ```
-// x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
-// [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
+// x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
+// [[[0], [2], [4]]], [[[0], [10], [12]]],
+// [[[0], [5], [7]]], [[[0], [13], [15]]],
+// [[[0], [6], [8]]], [[[0], [14], [16]]]]
// ```
//
-// The output tensor has shape `[2, 2, 4, 1]` and value:
-//
-// ```
-// x = [[[[1], [3]], [[5], [7]]],
-// [[[2], [4]], [[10], [12]]],
-// [[[5], [7]], [[13], [15]]],
-// [[[6], [8]], [[14], [16]]]]
-// ```
-func BatchToSpace(scope *Scope, input tf.Output, crops tf.Output, block_size int64) (output tf.Output) {
+// Among others, this operation is useful for reducing atrous convolution into
+// regular convolution.
+func SpaceToBatchND(scope *Scope, input tf.Output, block_shape tf.Output, paddings tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"block_size": block_size}
opspec := tf.OpSpec{
- Type: "BatchToSpace",
+ Type: "SpaceToBatchND",
Input: []tf.Input{
- input, crops,
+ input, block_shape, paddings,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// SparseToDenseAttr is an optional argument to SparseToDense.
-type SparseToDenseAttr func(optionalAttr)
+// QuantizeAndDequantizeV2Attr is an optional argument to QuantizeAndDequantizeV2.
+type QuantizeAndDequantizeV2Attr func(optionalAttr)
-// SparseToDenseValidateIndices sets the optional validate_indices attribute to value.
+// QuantizeAndDequantizeV2SignedInput sets the optional signed_input attribute to value.
//
-// value: If true, indices are checked to make sure they are sorted in
-// lexicographic order and that there are no repeats.
+// value: If the quantization is signed or unsigned.
// If not specified, defaults to true
-func SparseToDenseValidateIndices(value bool) SparseToDenseAttr {
+func QuantizeAndDequantizeV2SignedInput(value bool) QuantizeAndDequantizeV2Attr {
return func(m optionalAttr) {
- m["validate_indices"] = value
+ m["signed_input"] = value
}
}
-// Converts a sparse representation into a dense tensor.
-//
-// Builds an array `dense` with shape `output_shape` such that
-//
-// ```
-// # If sparse_indices is scalar
-// dense[i] = (i == sparse_indices ? sparse_values : default_value)
-//
-// # If sparse_indices is a vector, then for each i
-// dense[sparse_indices[i]] = sparse_values[i]
-//
-// # If sparse_indices is an n by d matrix, then for each i in [0, n)
-// dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
-// ```
-//
-// All other values in `dense` are set to `default_value`. If `sparse_values` is a
-// scalar, all sparse indices are set to this single value.
-//
-// Indices should be sorted in lexicographic order, and indices must not
-// contain any repeats. If `validate_indices` is true, these properties
-// are checked during execution.
-//
-// Arguments:
-// sparse_indices: 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete
-// index where `sparse_values[i]` will be placed.
-// output_shape: 1-D. Shape of the dense output tensor.
-// sparse_values: 1-D. Values corresponding to each row of `sparse_indices`,
-// or a scalar value to be used for all sparse indices.
-// default_value: Scalar value to set for indices not specified in
-// `sparse_indices`.
+// QuantizeAndDequantizeV2NumBits sets the optional num_bits attribute to value.
//
-// Returns Dense output tensor of shape `output_shape`.
-func SparseToDense(scope *Scope, sparse_indices tf.Output, output_shape tf.Output, sparse_values tf.Output, default_value tf.Output, optional ...SparseToDenseAttr) (dense tf.Output) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
- opspec := tf.OpSpec{
- Type: "SparseToDense",
- Input: []tf.Input{
- sparse_indices, output_shape, sparse_values, default_value,
- },
- Attrs: attrs,
+// value: The bitwidth of the quantization.
+// If not specified, defaults to 8
+func QuantizeAndDequantizeV2NumBits(value int64) QuantizeAndDequantizeV2Attr {
+ return func(m optionalAttr) {
+ m["num_bits"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// NthElementAttr is an optional argument to NthElement.
-type NthElementAttr func(optionalAttr)
-
-// NthElementReverse sets the optional reverse attribute to value.
+// QuantizeAndDequantizeV2RangeGiven sets the optional range_given attribute to value.
//
-// value: When set to True, find the nth-largest value in the vector and vice
-// versa.
+// value: If the range is given or should be computed from the tensor.
// If not specified, defaults to false
-func NthElementReverse(value bool) NthElementAttr {
+func QuantizeAndDequantizeV2RangeGiven(value bool) QuantizeAndDequantizeV2Attr {
return func(m optionalAttr) {
- m["reverse"] = value
+ m["range_given"] = value
}
}
-// Finds values of the `n`-th order statistic for the last dimension.
+// Quantizes then dequantizes a tensor.
//
-// If the input is a vector (rank-1), finds the entries which is the nth-smallest
-// value in the vector and outputs their values as scalar tensor.
+// This op simulates the precision loss from the quantized forward pass by:
+// 1. Quantizing the tensor to fixed point numbers, which should match the target
+// quantization method when it is used in inference.
+// 2. Dequantizing it back to floating point numbers for the following ops, most
+// likely matmul.
//
-// For matrices (resp. higher rank input), computes the entries which is the
-// nth-smallest value in each row (resp. vector along the last dimension). Thus,
+// There are different ways to quantize. This version does not use the full range
+// of the output type, choosing to elide the lowest possible value for symmetry
+// (e.g., output range is -127 to 127, not -128 to 127 for signed 8 bit
+// quantization), so that 0.0 maps to 0.
//
-// values.shape = input.shape[:-1]
+// To perform this op, we first find the range of values in our tensor. The range
+// we use is always centered on 0, so we find m such that
//
-// Arguments:
-// input: 1-D or higher with last dimension at least `n+1`.
-// n: 0-D. Position of sorted vector to select along the last dimension (along
-// each row for matrices). Valid range of n is `[0, input.shape[:-1])`
+// 1. m = max(abs(input_min), abs(input_max)) if range_given is true,
+// 2. m = max(abs(min_elem(input)), abs(max_elem(input))) otherwise.
//
-// Returns The `n`-th order statistic along each last dimensional slice.
-func NthElement(scope *Scope, input tf.Output, n tf.Output, optional ...NthElementAttr) (values tf.Output) {
+// Our input tensor range is then [-m, m].
+//
+// Next, we choose our fixed-point quantization buckets, [min_fixed, max_fixed].
+// If signed_input is true, this is
+//
+// [min_fixed, max_fixed ] =
+// [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1].
+//
+// Otherwise, if signed_input is false, the fixed-point range is
+//
+// [min_fixed, max_fixed] = [0, (1 << num_bits) - 1].
+//
+// From this we compute our scaling factor, s:
+//
+// s = (max_fixed - min_fixed) / (2 * m).
+//
+// Now we can quantize and dequantize the elements of our tensor. An element e
+// is transformed into e':
+//
+// e' = (e * s).round_to_nearest() / s.
+//
+// Note that we have a different number of buckets in the signed vs. unsigned
+// cases. For example, if num_bits == 8, we get 254 buckets in the signed case
+// vs. 255 in the unsigned case.
+//
+// For example, suppose num_bits = 8 and m = 1. Then
+//
+// [min_fixed, max_fixed] = [-127, 127], and
+// s = (127 + 127) / 2 = 127.
+//
+// Given the vector {-1, -0.5, 0, 0.3}, this is quantized to
+// {-127, -63, 0, 38}, and dequantized to {-1, -63.0/127, 0, 38.0/127}.
+//
+// Arguments:
+// input: Tensor to quantize and then dequantize.
+// input_min: If range_given, this is the min of the range, otherwise this input
+// will be ignored.
+// input_max: If range_given, this is the max of the range, otherwise this input
+// will be ignored.
+func QuantizeAndDequantizeV2(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, optional ...QuantizeAndDequantizeV2Attr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -27133,9 +27073,9 @@ func NthElement(scope *Scope, input tf.Output, n tf.Output, optional ...NthEleme
a(attrs)
}
opspec := tf.OpSpec{
- Type: "NthElement",
+ Type: "QuantizeAndDequantizeV2",
Input: []tf.Input{
- input, n,
+ input, input_min, input_max,
},
Attrs: attrs,
}
@@ -27143,88 +27083,113 @@ func NthElement(scope *Scope, input tf.Output, n tf.Output, optional ...NthEleme
return op.Output(0)
}
-// Computes asin of x element-wise.
-func Asin(scope *Scope, x tf.Output) (y tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "Asin",
- Input: []tf.Input{
- x,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// Computes the sum along sparse segments of a tensor.
+// SpaceToBatch for 4-D tensors of type T.
//
-// Like `SparseSegmentSum`, but allows missing ids in `segment_ids`. If an id is
-// misisng, the `output` tensor at that position will be zeroed.
+// This is a legacy version of the more general SpaceToBatchND.
//
-// Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
-// segments.
+// Zero-pads and then rearranges (permutes) blocks of spatial data into batch.
+// More specifically, this op outputs a copy of the input tensor where values from
+// the `height` and `width` dimensions are moved to the `batch` dimension. After
+// the zero-padding, both `height` and `width` of the input must be divisible by the
+// block size.
//
-// For example:
+// Arguments:
+// input: 4-D with shape `[batch, height, width, depth]`.
+// paddings: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
+// the padding of the input with zeros across the spatial dimensions as follows:
//
-// ```python
-// c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
+// paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]
//
-// tf.sparse_segment_sum_with_num_segments(
-// c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3)
-// # => [[0 0 0 0]
-// # [0 0 0 0]
-// # [0 0 0 0]]
+// The effective spatial dimensions of the zero-padded input tensor will be:
//
-// tf.sparse_segment_sum_with_num_segments(c,
-// tf.constant([0, 1]),
-// tf.constant([0, 2],
-// num_segments=4))
-// # => [[ 1 2 3 4]
-// # [ 0 0 0 0]
-// # [-1 -2 -3 -4]
-// # [ 0 0 0 0]]
+// height_pad = pad_top + height + pad_bottom
+// width_pad = pad_left + width + pad_right
+//
+// The attr `block_size` must be greater than one. It indicates the block size.
+//
+// * Non-overlapping blocks of size `block_size x block size` in the height and
+// width dimensions are rearranged into the batch dimension at each location.
+// * The batch of the output tensor is `batch * block_size * block_size`.
+// * Both height_pad and width_pad must be divisible by block_size.
+//
+// The shape of the output will be:
+//
+// [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
+// depth]
+//
+// Some examples:
+//
+// (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:
+//
+// ```
+// x = [[[[1], [2]], [[3], [4]]]]
// ```
//
-// Arguments:
+// The output tensor has shape `[4, 1, 1, 1]` and value:
//
-// indices: A 1-D tensor. Has same rank as `segment_ids`.
-// segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
-// num_segments: Should equal the number of distinct segment IDs.
+// ```
+// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
+// ```
//
-// Returns Has same shape as data, except for dimension 0 which
-// has size `num_segments`.
-func SparseSegmentSumWithNumSegments(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "SparseSegmentSumWithNumSegments",
- Input: []tf.Input{
- data, indices, segment_ids, num_segments,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// Pop the element at the top of the stack.
+// (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:
//
-// Arguments:
-// handle: The handle to a stack.
-// elem_type: The type of the elem that is popped.
+// ```
+// x = [[[[1, 2, 3], [4, 5, 6]],
+// [[7, 8, 9], [10, 11, 12]]]]
+// ```
//
-// Returns The tensor that is popped from the top of the stack.
-func StackPopV2(scope *Scope, handle tf.Output, elem_type tf.DataType) (elem tf.Output) {
+// The output tensor has shape `[4, 1, 1, 3]` and value:
+//
+// ```
+// [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
+// ```
+//
+// (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:
+//
+// ```
+// x = [[[[1], [2], [3], [4]],
+// [[5], [6], [7], [8]],
+// [[9], [10], [11], [12]],
+// [[13], [14], [15], [16]]]]
+// ```
+//
+// The output tensor has shape `[4, 2, 2, 1]` and value:
+//
+// ```
+// x = [[[[1], [3]], [[9], [11]]],
+// [[[2], [4]], [[10], [12]]],
+// [[[5], [7]], [[13], [15]]],
+// [[[6], [8]], [[14], [16]]]]
+// ```
+//
+// (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2:
+//
+// ```
+// x = [[[[1], [2], [3], [4]],
+// [[5], [6], [7], [8]]],
+// [[[9], [10], [11], [12]],
+// [[13], [14], [15], [16]]]]
+// ```
+//
+// The output tensor has shape `[8, 1, 2, 1]` and value:
+//
+// ```
+// x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
+// [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
+// ```
+//
+// Among others, this operation is useful for reducing atrous convolution into
+// regular convolution.
+//
+func SpaceToBatch(scope *Scope, input tf.Output, paddings tf.Output, block_size int64) (output tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"elem_type": elem_type}
+ attrs := map[string]interface{}{"block_size": block_size}
opspec := tf.OpSpec{
- Type: "StackPopV2",
+ Type: "SpaceToBatch",
Input: []tf.Input{
- handle,
+ input, paddings,
},
Attrs: attrs,
}
@@ -27232,259 +27197,434 @@ func StackPopV2(scope *Scope, handle tf.Output, elem_type tf.DataType) (elem tf.
return op.Output(0)
}
-// WholeFileReaderV2Attr is an optional argument to WholeFileReaderV2.
-type WholeFileReaderV2Attr func(optionalAttr)
+// UnpackAttr is an optional argument to Unpack.
+type UnpackAttr func(optionalAttr)
-// WholeFileReaderV2Container sets the optional container attribute to value.
+// UnpackAxis sets the optional axis attribute to value.
//
-// value: If non-empty, this reader is placed in the given container.
-// Otherwise, a default container is used.
-// If not specified, defaults to ""
-func WholeFileReaderV2Container(value string) WholeFileReaderV2Attr {
+// value: Dimension along which to unpack. Negative values wrap around, so the
+// valid range is `[-R, R)`.
+// If not specified, defaults to 0
+func UnpackAxis(value int64) UnpackAttr {
return func(m optionalAttr) {
- m["container"] = value
+ m["axis"] = value
}
}
-// WholeFileReaderV2SharedName sets the optional shared_name attribute to value.
+// Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors.
//
-// value: If non-empty, this reader is named in the given bucket
-// with this shared_name. Otherwise, the node name is used instead.
-// If not specified, defaults to ""
-func WholeFileReaderV2SharedName(value string) WholeFileReaderV2Attr {
- return func(m optionalAttr) {
- m["shared_name"] = value
- }
-}
-
-// A Reader that outputs the entire contents of a file as a value.
+// Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
+// For example, given a tensor of shape `(A, B, C, D)`;
//
-// To use, enqueue filenames in a Queue. The output of ReaderRead will
-// be a filename (key) and the contents of that file (value).
+// If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]`
+// and each tensor in `output` will have shape `(B, C, D)`. (Note that the
+// dimension unpacked along is gone, unlike `split`).
//
-// Returns The handle to reference the Reader.
-func WholeFileReaderV2(scope *Scope, optional ...WholeFileReaderV2Attr) (reader_handle tf.Output) {
+// If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]`
+// and each tensor in `output` will have shape `(A, C, D)`.
+// Etc.
+//
+// This is the opposite of `pack`.
+//
+// Arguments:
+// value: 1-D or higher, with `axis` dimension size equal to `num`.
+//
+//
+// Returns The list of tensors unpacked from `value`.
+func Unpack(scope *Scope, value tf.Output, num int64, optional ...UnpackAttr) (output []tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"num": num}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "WholeFileReaderV2",
-
+ Type: "Unpack",
+ Input: []tf.Input{
+ value,
+ },
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
+ if scope.Err() != nil {
+ return
+ }
+ var idx int
+ var err error
+ if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
+ scope.UpdateErr("Unpack", err)
+ return
+ }
+ return output
}
-// Computes the mean along sparse segments of a tensor.
-//
-// Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
-// segments.
-//
-// Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first
-// dimension, selecting a subset of dimension 0, specified by `indices`.
+// Increments variable pointed to by 'resource' until it reaches 'limit'.
//
// Arguments:
+// resource: Should be from a scalar `Variable` node.
+// limit: If incrementing ref would bring it above limit, instead generates an
+// 'OutOfRange' error.
//
-// indices: A 1-D tensor. Has same rank as `segment_ids`.
-// segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
//
-// Returns Has same shape as data, except for dimension 0 which
-// has size `k`, the number of segments.
-func SparseSegmentMean(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output) (output tf.Output) {
+// Returns A copy of the input before increment. If nothing else modifies the
+// input, the values produced will all be distinct.
+func ResourceCountUpTo(scope *Scope, resource tf.Output, limit int64, T tf.DataType) (output tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"limit": limit, "T": T}
opspec := tf.OpSpec{
- Type: "SparseSegmentMean",
+ Type: "ResourceCountUpTo",
Input: []tf.Input{
- data, indices, segment_ids,
+ resource,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Computes the mean along sparse segments of a tensor.
-//
-// Like `SparseSegmentMean`, but allows missing ids in `segment_ids`. If an id is
-// misisng, the `output` tensor at that position will be zeroed.
-//
-// Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
-// segments.
+// Delete the stack from its resource container.
//
// Arguments:
+// handle: The handle to a stack.
//
-// indices: A 1-D tensor. Has same rank as `segment_ids`.
-// segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
-// num_segments: Should equal the number of distinct segment IDs.
-//
-// Returns Has same shape as data, except for dimension 0 which has size
-// `num_segments`.
-func SparseSegmentMeanWithNumSegments(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
+// Returns the created operation.
+func StackCloseV2(scope *Scope, handle tf.Output) (o *tf.Operation) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "SparseSegmentMeanWithNumSegments",
+ Type: "StackCloseV2",
Input: []tf.Input{
- data, indices, segment_ids, num_segments,
+ handle,
},
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}
-// Computes the sum along sparse segments of a tensor divided by the sqrt of N.
+// BatchToSpace for N-D tensors of type T.
//
-// N is the size of the segment being reduced.
+// This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape
+// `block_shape + [batch]`, interleaves these blocks back into the grid defined by
+// the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as
+// the input. The spatial dimensions of this intermediate result are then
+// optionally cropped according to `crops` to produce the output. This is the
+// reverse of SpaceToBatch. See below for a precise description.
//
-// Like `SparseSegmentSqrtN`, but allows missing ids in `segment_ids`. If an id is
-// misisng, the `output` tensor at that position will be zeroed.
+// Arguments:
+// input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
+// where spatial_shape has M dimensions.
+// block_shape: 1-D with shape `[M]`, all values must be >= 1.
+// crops: 2-D with shape `[M, 2]`, all values must be >= 0.
+// `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input
+// dimension `i + 1`, which corresponds to spatial dimension `i`. It is
+// required that
+// `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.
//
-// Read @{$math_ops#segmentation$the section on segmentation} for an explanation of
-// segments.
+// This operation is equivalent to the following steps:
//
-// Arguments:
+// 1. Reshape `input` to `reshaped` of shape:
+// [block_shape[0], ..., block_shape[M-1],
+// batch / prod(block_shape),
+// input_shape[1], ..., input_shape[N-1]]
//
-// indices: A 1-D tensor. Has same rank as `segment_ids`.
-// segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
-// num_segments: Should equal the number of distinct segment IDs.
+// 2. Permute dimensions of `reshaped` to produce `permuted` of shape
+// [batch / prod(block_shape),
//
-// Returns Has same shape as data, except for dimension 0 which
-// has size `k`, the number of segments.
-func SparseSegmentSqrtNWithNumSegments(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "SparseSegmentSqrtNWithNumSegments",
- Input: []tf.Input{
- data, indices, segment_ids, num_segments,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// Reshapes a quantized tensor as per the Reshape op.
+// input_shape[1], block_shape[0],
+// ...,
+// input_shape[M], block_shape[M-1],
+//
+// input_shape[M+1], ..., input_shape[N-1]]
+//
+// 3. Reshape `permuted` to produce `reshaped_permuted` of shape
+// [batch / prod(block_shape),
+//
+// input_shape[1] * block_shape[0],
+// ...,
+// input_shape[M] * block_shape[M-1],
+//
+// input_shape[M+1],
+// ...,
+// input_shape[N-1]]
+//
+// 4. Crop the start and end of dimensions `[1, ..., M]` of
+// `reshaped_permuted` according to `crops` to produce the output of shape:
+// [batch / prod(block_shape),
+//
+// input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],
+// ...,
+// input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],
+//
+// input_shape[M+1], ..., input_shape[N-1]]
+//
+// Some examples:
//
+// (1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and
+// `crops = [[0, 0], [0, 0]]`:
+//
+// ```
+// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
// ```
//
-// Arguments:
+// The output tensor has shape `[1, 2, 2, 1]` and value:
//
-// shape: Defines the shape of the output tensor.
-// input_min: The minimum value of the input.
-// input_max: The maximum value of the input.
+// ```
+// x = [[[[1], [2]], [[3], [4]]]]
+// ```
//
-// Returns This value is copied from input_min.This value is copied from input_max.
-func QuantizedReshape(scope *Scope, tensor tf.Output, shape tf.Output, input_min tf.Output, input_max tf.Output) (output tf.Output, output_min tf.Output, output_max tf.Output) {
+// (2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and
+// `crops = [[0, 0], [0, 0]]`:
+//
+// ```
+// [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
+// ```
+//
+// The output tensor has shape `[1, 2, 2, 3]` and value:
+//
+// ```
+// x = [[[[1, 2, 3], [4, 5, 6]],
+// [[7, 8, 9], [10, 11, 12]]]]
+// ```
+//
+// (3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and
+// `crops = [[0, 0], [0, 0]]`:
+//
+// ```
+// x = [[[[1], [3]], [[9], [11]]],
+// [[[2], [4]], [[10], [12]]],
+// [[[5], [7]], [[13], [15]]],
+// [[[6], [8]], [[14], [16]]]]
+// ```
+//
+// The output tensor has shape `[1, 4, 4, 1]` and value:
+//
+// ```
+// x = [[[1], [2], [3], [4]],
+// [[5], [6], [7], [8]],
+// [[9], [10], [11], [12]],
+// [[13], [14], [15], [16]]]
+// ```
+//
+// (4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and
+// `crops = [[0, 0], [2, 0]]`:
+//
+// ```
+// x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
+// [[[0], [2], [4]]], [[[0], [10], [12]]],
+// [[[0], [5], [7]]], [[[0], [13], [15]]],
+// [[[0], [6], [8]]], [[[0], [14], [16]]]]
+// ```
+//
+// The output tensor has shape `[2, 2, 4, 1]` and value:
+//
+// ```
+// x = [[[[1], [2], [3], [4]],
+// [[5], [6], [7], [8]]],
+// [[[9], [10], [11], [12]],
+// [[13], [14], [15], [16]]]]
+// ```
+func BatchToSpaceND(scope *Scope, input tf.Output, block_shape tf.Output, crops tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "QuantizedReshape",
+ Type: "BatchToSpaceND",
Input: []tf.Input{
- tensor, shape, input_min, input_max,
+ input, block_shape, crops,
},
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
+ return op.Output(0)
}
-// Computes gradients for SparseSegmentSqrtN.
-//
-// Returns tensor "output" with same shape as grad, except for dimension 0 whose
-// value is output_dim0.
+// Extract `patches` from `images` and put them in the "depth" output dimension.
//
// Arguments:
-// grad: gradient propagated to the SparseSegmentSqrtN op.
-// indices: indices passed to the corresponding SparseSegmentSqrtN op.
-// segment_ids: segment_ids passed to the corresponding SparseSegmentSqrtN op.
-// output_dim0: dimension 0 of "data" passed to SparseSegmentSqrtN op.
-func SparseSegmentSqrtNGrad(scope *Scope, grad tf.Output, indices tf.Output, segment_ids tf.Output, output_dim0 tf.Output) (output tf.Output) {
+// images: 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.
+// ksizes: The size of the sliding window for each dimension of `images`.
+// strides: 1-D of length 4. How far the centers of two consecutive patches are in
+// the images. Must be: `[1, stride_rows, stride_cols, 1]`.
+// rates: 1-D of length 4. Must be: `[1, rate_rows, rate_cols, 1]`. This is the
+// input stride, specifying how far two consecutive patch samples are in the
+// input. Equivalent to extracting patches with
+// `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by
+// subsampling them spatially by a factor of `rates`. This is equivalent to
+// `rate` in dilated (a.k.a. Atrous) convolutions.
+// padding: The type of padding algorithm to use.
+//
+// We specify the size-related attributes as:
+//
+// ```python
+// ksizes = [1, ksize_rows, ksize_cols, 1]
+// strides = [1, strides_rows, strides_cols, 1]
+// rates = [1, rates_rows, rates_cols, 1]
+// ```
+//
+// Returns 4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows *
+// ksize_cols * depth]` containing image patches with size
+// `ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension. Note
+// `out_rows` and `out_cols` are the dimensions of the output patches.
+func ExtractImagePatches(scope *Scope, images tf.Output, ksizes []int64, strides []int64, rates []int64, padding string) (patches tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"ksizes": ksizes, "strides": strides, "rates": rates, "padding": padding}
opspec := tf.OpSpec{
- Type: "SparseSegmentSqrtNGrad",
+ Type: "ExtractImagePatches",
Input: []tf.Input{
- grad, indices, segment_ids, output_dim0,
+ images,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Creates a sequence of numbers.
-//
-// This operation creates a sequence of numbers that begins at `start` and
-// extends by increments of `delta` up to but not including `limit`.
+// Bitcasts a tensor from one type to another without copying data.
//
-// For example:
+// Given a tensor `input`, this operation returns a tensor that has the same buffer
+// data as `input` with datatype `type`.
//
-// ```
-// # 'start' is 3
-// # 'limit' is 18
-// # 'delta' is 3
-// tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
-// ```
+// If the input datatype `T` is larger than the output datatype `type` then the
+// shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)].
//
-// Arguments:
-// start: 0-D (scalar). First entry in the sequence.
-// limit: 0-D (scalar). Upper limit of sequence, exclusive.
-// delta: 0-D (scalar). Optional. Default is 1. Number that increments `start`.
+// If `T` is smaller than `type`, the operator requires that the rightmost
+// dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from
+// [..., sizeof(`type`)/sizeof(`T`)] to [...].
//
-// Returns 1-D.
-func Range(scope *Scope, start tf.Output, limit tf.Output, delta tf.Output) (output tf.Output) {
+// *NOTE*: Bitcast is implemented as a low-level cast, so machines with different
+// endian orderings will give different results.
+func Bitcast(scope *Scope, input tf.Output, type_ tf.DataType) (output tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{"type": type_}
opspec := tf.OpSpec{
- Type: "Range",
+ Type: "Bitcast",
Input: []tf.Input{
- start, limit, delta,
+ input,
},
+ Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// AngleAttr is an optional argument to Angle.
-type AngleAttr func(optionalAttr)
+// OneHotAttr is an optional argument to OneHot.
+type OneHotAttr func(optionalAttr)
-// AngleTout sets the optional Tout attribute to value.
-// If not specified, defaults to DT_FLOAT
-func AngleTout(value tf.DataType) AngleAttr {
+// OneHotAxis sets the optional axis attribute to value.
+//
+// value: The axis to fill (default: -1, a new inner-most axis).
+// If not specified, defaults to -1
+func OneHotAxis(value int64) OneHotAttr {
return func(m optionalAttr) {
- m["Tout"] = value
+ m["axis"] = value
}
}
-// Returns the argument of a complex number.
+// Returns a one-hot tensor.
//
-// Given a tensor `input` of complex numbers, this operation returns a tensor of
-// type `float` that is the argument of each element in `input`. All elements in
-// `input` must be complex numbers of the form \\(a + bj\\), where *a*
-// is the real part and *b* is the imaginary part.
+// The locations represented by indices in `indices` take value `on_value`,
+// while all other locations take value `off_value`.
//
-// The argument returned by this operation is of the form \\(atan2(b, a)\\).
+// If the input `indices` is rank `N`, the output will have rank `N+1`,
+// The new axis is created at dimension `axis` (default: the new axis is
+// appended at the end).
//
-// For example:
+// If `indices` is a scalar the output shape will be a vector of length `depth`.
//
+// If `indices` is a vector of length `features`, the output shape will be:
// ```
-// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
-// tf.angle(input) ==> [2.0132, 1.056]
+// features x depth if axis == -1
+// depth x features if axis == 0
// ```
//
-// @compatibility(numpy)
-// Equivalent to np.angle.
-// @end_compatibility
-func Angle(scope *Scope, input tf.Output, optional ...AngleAttr) (output tf.Output) {
+// If `indices` is a matrix (batch) with shape `[batch, features]`,
+// the output shape will be:
+// ```
+// batch x features x depth if axis == -1
+// batch x depth x features if axis == 1
+// depth x batch x features if axis == 0
+// ```
+//
+//
+// Examples
+// =========
+//
+// Suppose that
+//
+// ```
+// indices = [0, 2, -1, 1]
+// depth = 3
+// on_value = 5.0
+// off_value = 0.0
+// axis = -1
+// ```
+//
+// Then output is `[4 x 3]`:
+//
+// ```output =
+// [5.0 0.0 0.0] // one_hot(0)
+// [0.0 0.0 5.0] // one_hot(2)
+// [0.0 0.0 0.0] // one_hot(-1)
+// [0.0 5.0 0.0] // one_hot(1)
+// ```
+//
+// Suppose that
+//
+// ```
+// indices = [0, 2, -1, 1]
+// depth = 3
+// on_value = 0.0
+// off_value = 3.0
+// axis = 0
+// ```
+//
+// Then output is `[3 x 4]`:
+//
+// ```output =
+// [0.0 3.0 3.0 3.0]
+// [3.0 3.0 3.0 0.0]
+// [3.0 3.0 3.0 3.0]
+// [3.0 0.0 3.0 3.0]
+// // ^ one_hot(0)
+// // ^ one_hot(2)
+// // ^ one_hot(-1)
+// // ^ one_hot(1)
+// ```
+// Suppose that
+//
+// ```
+// indices = [[0, 2], [1, -1]]
+// depth = 3
+// on_value = 1.0
+// off_value = 0.0
+// axis = -1
+// ```
+//
+// Then output is `[2 x 2 x 3]`:
+//
+// ```output =
+// [
+// [1.0, 0.0, 0.0] // one_hot(0)
+// [0.0, 0.0, 1.0] // one_hot(2)
+// ][
+// [0.0, 1.0, 0.0] // one_hot(1)
+// [0.0, 0.0, 0.0] // one_hot(-1)
+// ]```
+//
+// Arguments:
+// indices: A tensor of indices.
+// depth: A scalar defining the depth of the one hot dimension.
+// on_value: A scalar defining the value to fill in output when `indices[j] = i`.
+// off_value: A scalar defining the value to fill in output when `indices[j] != i`.
+//
+// Returns The one-hot tensor.
+func OneHot(scope *Scope, indices tf.Output, depth tf.Output, on_value tf.Output, off_value tf.Output, optional ...OneHotAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -27493,9 +27633,9 @@ func Angle(scope *Scope, input tf.Output, optional ...AngleAttr) (output tf.Outp
a(attrs)
}
opspec := tf.OpSpec{
- Type: "Angle",
+ Type: "OneHot",
Input: []tf.Input{
- input,
+ indices, depth, on_value, off_value,
},
Attrs: attrs,
}
@@ -27503,197 +27643,185 @@ func Angle(scope *Scope, input tf.Output, optional ...AngleAttr) (output tf.Outp
return op.Output(0)
}
-// ResourceSparseApplyMomentumAttr is an optional argument to ResourceSparseApplyMomentum.
-type ResourceSparseApplyMomentumAttr func(optionalAttr)
-
-// ResourceSparseApplyMomentumUseLocking sets the optional use_locking attribute to value.
-//
-// value: If `True`, updating of the var and accum tensors will be protected
-// by a lock; otherwise the behavior is undefined, but may exhibit less
-// contention.
-// If not specified, defaults to false
-func ResourceSparseApplyMomentumUseLocking(value bool) ResourceSparseApplyMomentumAttr {
- return func(m optionalAttr) {
- m["use_locking"] = value
- }
-}
+// QueueDequeueV2Attr is an optional argument to QueueDequeueV2.
+type QueueDequeueV2Attr func(optionalAttr)
-// ResourceSparseApplyMomentumUseNesterov sets the optional use_nesterov attribute to value.
+// QueueDequeueV2TimeoutMs sets the optional timeout_ms attribute to value.
//
-// value: If `True`, the tensor passed to compute grad will be
-// var - lr * momentum * accum, so in the end, the var you get is actually
-// var - lr * momentum * accum.
-// If not specified, defaults to false
-func ResourceSparseApplyMomentumUseNesterov(value bool) ResourceSparseApplyMomentumAttr {
+// value: If the queue is empty, this operation will block for up to
+// timeout_ms milliseconds.
+// Note: This option is not supported yet.
+// If not specified, defaults to -1
+func QueueDequeueV2TimeoutMs(value int64) QueueDequeueV2Attr {
return func(m optionalAttr) {
- m["use_nesterov"] = value
+ m["timeout_ms"] = value
}
}
-// Update relevant entries in '*var' and '*accum' according to the momentum scheme.
-//
-// Set use_nesterov = True if you want to use Nesterov momentum.
+// Dequeues a tuple of one or more tensors from the given queue.
//
-// That is for rows we have grad for, we update var and accum as follows:
+// This operation has k outputs, where k is the number of components
+// in the tuples stored in the given queue, and output i is the ith
+// component of the dequeued tuple.
//
-// accum = accum * momentum + grad
-// var -= lr * accum
+// N.B. If the queue is empty, this operation will block until an element
+// has been dequeued (or 'timeout_ms' elapses, if specified).
//
// Arguments:
-// var_: Should be from a Variable().
-// accum: Should be from a Variable().
-// lr: Learning rate. Must be a scalar.
-// grad: The gradient.
-// indices: A vector of indices into the first dimension of var and accum.
-// momentum: Momentum. Must be a scalar.
+// handle: The handle to a queue.
+// component_types: The type of each component in a tuple.
//
-// Returns the created operation.
-func ResourceSparseApplyMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, indices tf.Output, momentum tf.Output, optional ...ResourceSparseApplyMomentumAttr) (o *tf.Operation) {
+// Returns One or more tensors that were dequeued as a tuple.
+func QueueDequeueV2(scope *Scope, handle tf.Output, component_types []tf.DataType, optional ...QueueDequeueV2Attr) (components []tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
+ attrs := map[string]interface{}{"component_types": component_types}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "ResourceSparseApplyMomentum",
+ Type: "QueueDequeueV2",
Input: []tf.Input{
- var_, accum, lr, grad, indices, momentum,
+ handle,
},
Attrs: attrs,
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ if scope.Err() != nil {
+ return
+ }
+ var idx int
+ var err error
+ if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
+ scope.UpdateErr("QueueDequeueV2", err)
+ return
+ }
+ return components
}
-// Returns the complex conjugate of a complex number.
-//
-// Given a tensor `input` of complex numbers, this operation returns a tensor of
-// complex numbers that are the complex conjugate of each element in `input`. The
-// complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
-// real part and *b* is the imaginary part.
+// Returns locations of nonzero / true values in a tensor.
//
-// The complex conjugate returned by this operation is of the form \\(a - bj\\).
+// This operation returns the coordinates of true elements in `condition`. The
+// coordinates are returned in a 2-D tensor where the first dimension (rows)
+// represents the number of true elements, and the second dimension (columns)
+// represents the coordinates of the true elements. Keep in mind, the shape of
+// the output tensor can vary depending on how many true values there are in
+// `condition`. Indices are output in row-major order.
//
// For example:
//
// ```
-// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
-// tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
-// ```
-func Conj(scope *Scope, input tf.Output) (output tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "Conj",
- Input: []tf.Input{
- input,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// A placeholder op that passes through `input` when its output is not fed.
+// # 'input' tensor is [[True, False]
+// # [True, False]]
+// # 'input' has two true values, so output has two coordinates.
+// # 'input' has rank of 2, so coordinates have two indices.
+// where(input) ==> [[0, 0],
+// [1, 0]]
//
-// Arguments:
-// input: The default value to produce when `output` is not fed.
-// shape: The (possibly partial) shape of the tensor.
+// # `condition` tensor is [[[True, False]
+// # [True, False]]
+// # [[False, True]
+// # [False, True]]
+// # [[False, False]
+// # [False, True]]]
+// # 'input' has 5 true values, so output has 5 coordinates.
+// # 'input' has rank of 3, so coordinates have three indices.
+// where(input) ==> [[0, 0, 0],
+// [0, 1, 0],
+// [1, 0, 1],
+// [1, 1, 1],
+// [2, 1, 1]]
//
-// Returns A placeholder tensor that defaults to `input` if it is not fed.
-func PlaceholderWithDefault(scope *Scope, input tf.Output, shape tf.Shape) (output tf.Output) {
+// # `condition` tensor is [[[1.5, 0.0]
+// # [-0.5, 0.0]]
+// # [[0.0, 0.25]
+// # [0.0, 0.75]]
+// # [[0.0, 0.0]
+// # [0.0, 0.01]]]
+// # 'input' has 5 nonzero values, so output has 5 coordinates.
+// # 'input' has rank of 3, so coordinates have three indices.
+// where(input) ==> [[0, 0, 0],
+// [0, 1, 0],
+// [1, 0, 1],
+// [1, 1, 1],
+// [2, 1, 1]]
+//
+// # `condition` tensor is [[[1.5 + 0.0j, 0.0 + 0.0j]
+// # [0.0 + 0.5j, 0.0 + 0.0j]]
+// # [[0.0 + 0.0j, 0.25 + 1.5j]
+// # [0.0 + 0.0j, 0.75 + 0.0j]]
+// # [[0.0 + 0.0j, 0.0 + 0.0j]
+// # [0.0 + 0.0j, 0.01 + 0.0j]]]
+// # 'input' has 5 nonzero magnitude values, so output has 5 coordinates.
+// # 'input' has rank of 3, so coordinates have three indices.
+// where(input) ==> [[0, 0, 0],
+// [0, 1, 0],
+// [1, 0, 1],
+// [1, 1, 1],
+// [2, 1, 1]]
+// ```
+func Where(scope *Scope, condition tf.Output) (index tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"shape": shape}
opspec := tf.OpSpec{
- Type: "PlaceholderWithDefault",
+ Type: "Where",
Input: []tf.Input{
- input,
+ condition,
},
- Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Deprecated. Use TensorArrayReadV3
-func TensorArrayReadV2(scope *Scope, handle tf.Output, index tf.Output, flow_in tf.Output, dtype tf.DataType) (value tf.Output) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{"dtype": dtype}
- opspec := tf.OpSpec{
- Type: "TensorArrayReadV2",
- Input: []tf.Input{
- handle, index, flow_in,
- },
- Attrs: attrs,
+// QuantizeAndDequantizeAttr is an optional argument to QuantizeAndDequantize.
+type QuantizeAndDequantizeAttr func(optionalAttr)
+
+// QuantizeAndDequantizeSignedInput sets the optional signed_input attribute to value.
+// If not specified, defaults to true
+func QuantizeAndDequantizeSignedInput(value bool) QuantizeAndDequantizeAttr {
+ return func(m optionalAttr) {
+ m["signed_input"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// QuantizedMatMulAttr is an optional argument to QuantizedMatMul.
-type QuantizedMatMulAttr func(optionalAttr)
-
-// QuantizedMatMulToutput sets the optional Toutput attribute to value.
-// If not specified, defaults to DT_QINT32
-func QuantizedMatMulToutput(value tf.DataType) QuantizedMatMulAttr {
+// QuantizeAndDequantizeNumBits sets the optional num_bits attribute to value.
+// If not specified, defaults to 8
+func QuantizeAndDequantizeNumBits(value int64) QuantizeAndDequantizeAttr {
return func(m optionalAttr) {
- m["Toutput"] = value
+ m["num_bits"] = value
}
}
-// QuantizedMatMulTransposeA sets the optional transpose_a attribute to value.
-//
-// value: If true, `a` is transposed before multiplication.
+// QuantizeAndDequantizeRangeGiven sets the optional range_given attribute to value.
// If not specified, defaults to false
-func QuantizedMatMulTransposeA(value bool) QuantizedMatMulAttr {
+func QuantizeAndDequantizeRangeGiven(value bool) QuantizeAndDequantizeAttr {
return func(m optionalAttr) {
- m["transpose_a"] = value
+ m["range_given"] = value
}
}
-// QuantizedMatMulTransposeB sets the optional transpose_b attribute to value.
-//
-// value: If true, `b` is transposed before multiplication.
-// If not specified, defaults to false
-func QuantizedMatMulTransposeB(value bool) QuantizedMatMulAttr {
+// QuantizeAndDequantizeInputMin sets the optional input_min attribute to value.
+// If not specified, defaults to 0
+func QuantizeAndDequantizeInputMin(value float32) QuantizeAndDequantizeAttr {
return func(m optionalAttr) {
- m["transpose_b"] = value
+ m["input_min"] = value
}
}
-// QuantizedMatMulTactivation sets the optional Tactivation attribute to value.
-//
-// value: The type of output produced by activation function
-// following this operation.
-// If not specified, defaults to DT_QUINT8
-func QuantizedMatMulTactivation(value tf.DataType) QuantizedMatMulAttr {
+// QuantizeAndDequantizeInputMax sets the optional input_max attribute to value.
+// If not specified, defaults to 0
+func QuantizeAndDequantizeInputMax(value float32) QuantizeAndDequantizeAttr {
return func(m optionalAttr) {
- m["Tactivation"] = value
+ m["input_max"] = value
}
}
-// Perform a quantized matrix multiplication of `a` by the matrix `b`.
-//
-// The inputs must be two-dimensional matrices and the inner dimension of
-// `a` (after being transposed if `transpose_a` is non-zero) must match the
-// outer dimension of `b` (after being transposed if `transposed_b` is
-// non-zero).
-//
-// Arguments:
-// a: Must be a two-dimensional tensor.
-// b: Must be a two-dimensional tensor.
-// min_a: The float value that the lowest quantized `a` value represents.
-// max_a: The float value that the highest quantized `a` value represents.
-// min_b: The float value that the lowest quantized `b` value represents.
-// max_b: The float value that the highest quantized `b` value represents.
+// Use QuantizeAndDequantizeV2 instead.
//
-// Returns The float value that the lowest quantized output value represents.The float value that the highest quantized output value represents.
-func QuantizedMatMul(scope *Scope, a tf.Output, b tf.Output, min_a tf.Output, max_a tf.Output, min_b tf.Output, max_b tf.Output, optional ...QuantizedMatMulAttr) (out tf.Output, min_out tf.Output, max_out tf.Output) {
+// DEPRECATED at GraphDef version 22: Replaced by QuantizeAndDequantizeV2
+func QuantizeAndDequantize(scope *Scope, input tf.Output, optional ...QuantizeAndDequantizeAttr) (output tf.Output) {
if scope.Err() != nil {
return
}
@@ -27702,75 +27830,47 @@ func QuantizedMatMul(scope *Scope, a tf.Output, b tf.Output, min_a tf.Output, ma
a(attrs)
}
opspec := tf.OpSpec{
- Type: "QuantizedMatMul",
+ Type: "QuantizeAndDequantize",
Input: []tf.Input{
- a, b, min_a, max_a, min_b, max_b,
+ input,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
-}
-
-// QuantizedMulAttr is an optional argument to QuantizedMul.
-type QuantizedMulAttr func(optionalAttr)
-
-// QuantizedMulToutput sets the optional Toutput attribute to value.
-// If not specified, defaults to DT_QINT32
-func QuantizedMulToutput(value tf.DataType) QuantizedMulAttr {
- return func(m optionalAttr) {
- m["Toutput"] = value
- }
+ return op.Output(0)
}
-// Returns x * y element-wise, working on quantized buffers.
+// Returns the diagonal part of the tensor.
//
-// Arguments:
+// This operation returns a tensor with the `diagonal` part
+// of the `input`. The `diagonal` part is computed as follows:
//
+// Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a
+// tensor of rank `k` with dimensions `[D1,..., Dk]` where:
//
-// min_x: The float value that the lowest quantized `x` value represents.
-// max_x: The float value that the highest quantized `x` value represents.
-// min_y: The float value that the lowest quantized `y` value represents.
-// max_y: The float value that the highest quantized `y` value represents.
+// `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.
//
-// Returns The float value that the lowest quantized output value represents.The float value that the highest quantized output value represents.
+// For example:
//
-// *NOTE*: `QuantizedMul` supports limited forms of broadcasting. More about
-// broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-func QuantizedMul(scope *Scope, x tf.Output, y tf.Output, min_x tf.Output, max_x tf.Output, min_y tf.Output, max_y tf.Output, optional ...QuantizedMulAttr) (z tf.Output, min_z tf.Output, max_z tf.Output) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
- opspec := tf.OpSpec{
- Type: "QuantizedMul",
- Input: []tf.Input{
- x, y, min_x, max_x, min_y, max_y,
- },
- Attrs: attrs,
- }
- op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
-}
-
-// Forwards the input to the output.
+// ```
+// # 'input' is [[1, 0, 0, 0]
+// [0, 2, 0, 0]
+// [0, 0, 3, 0]
+// [0, 0, 0, 4]]
//
-// This operator represents the loop termination condition used by the
-// "pivot" switches of a loop.
+// tf.diag_part(input) ==> [1, 2, 3, 4]
+// ```
//
// Arguments:
-// input: A boolean scalar, representing the branch predicate of the Switch op.
+// input: Rank k tensor where k is even and not zero.
//
-// Returns The same tensor as `input`.
-func LoopCond(scope *Scope, input tf.Output) (output tf.Output) {
+// Returns The extracted diagonal.
+func DiagPart(scope *Scope, input tf.Output) (diagonal tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
- Type: "LoopCond",
+ Type: "DiagPart",
Input: []tf.Input{
input,
},
@@ -27779,561 +27879,461 @@ func LoopCond(scope *Scope, input tf.Output) (output tf.Output) {
return op.Output(0)
}
-// Returns (x - y)(x - y) element-wise.
+// QuantizedInstanceNormAttr is an optional argument to QuantizedInstanceNorm.
+type QuantizedInstanceNormAttr func(optionalAttr)
+
+// QuantizedInstanceNormOutputRangeGiven sets the optional output_range_given attribute to value.
//
-// *NOTE*: `SquaredDifference` supports broadcasting. More about broadcasting
-// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
-func SquaredDifference(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "SquaredDifference",
- Input: []tf.Input{
- x, y,
- },
+// value: If True, `given_y_min` and `given_y_min`
+// and `given_y_max` are used as the output range. Otherwise,
+// the implementation computes the output range.
+// If not specified, defaults to false
+func QuantizedInstanceNormOutputRangeGiven(value bool) QuantizedInstanceNormAttr {
+ return func(m optionalAttr) {
+ m["output_range_given"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// Convert the quantized 'input' tensor into a lower-precision 'output', using the
-//
-// actual distribution of the values to maximize the usage of the lower bit depth
-// and adjusting the output min and max ranges accordingly.
-//
-// [input_min, input_max] are scalar floats that specify the range for the float
-// interpretation of the 'input' data. For example, if input_min is -1.0f and
-// input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0
-// value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
-//
-// This operator tries to squeeze as much precision as possible into an output with
-// a lower bit depth by calculating the actual min and max values found in the
-// data. For example, maybe that quint16 input has no values lower than 16,384 and
-// none higher than 49,152. That means only half the range is actually needed, all
-// the float interpretations are between -0.5f and 0.5f, so if we want to compress
-// the data into a quint8 output, we can use that range rather than the theoretical
-// -1.0f to 1.0f that is suggested by the input min and max.
-//
-// In practice, this is most useful for taking output from operations like
-// QuantizedMatMul that can produce higher bit-depth outputs than their inputs and
-// may have large potential output ranges, but in practice have a distribution of
-// input values that only uses a small fraction of the possible range. By feeding
-// that output into this operator, we can reduce it from 32 bits down to 8 with
-// minimal loss of accuracy.
-//
-// Arguments:
-//
-// input_min: The float value that the minimum quantized input value represents.
-// input_max: The float value that the maximum quantized input value represents.
-// out_type: The type of the output. Should be a lower bit depth than Tinput.
+// QuantizedInstanceNormGivenYMin sets the optional given_y_min attribute to value.
//
-// Returns The float value that the minimum quantized output value represents.The float value that the maximum quantized output value represents.
-func QuantizeDownAndShrinkRange(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, out_type tf.DataType) (output tf.Output, output_min tf.Output, output_max tf.Output) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{"out_type": out_type}
- opspec := tf.OpSpec{
- Type: "QuantizeDownAndShrinkRange",
- Input: []tf.Input{
- input, input_min, input_max,
- },
- Attrs: attrs,
+// value: Output in `y_min` if `output_range_given` is True.
+// If not specified, defaults to 0
+func QuantizedInstanceNormGivenYMin(value float32) QuantizedInstanceNormAttr {
+ return func(m optionalAttr) {
+ m["given_y_min"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0), op.Output(1), op.Output(2)
}
-// Compare values of `input` to `threshold` and pack resulting bits into a `uint8`.
-//
-// Each comparison returns a boolean `true` (if `input_value > threshold`)
-// or and `false` otherwise.
-//
-// This operation is useful for Locality-Sensitive-Hashing (LSH) and other
-// algorithms that use hashing approximations of cosine and `L2` distances;
-// codes can be generated from an input via:
-//
-// ```python
-// codebook_size = 50
-// codebook_bits = codebook_size * 32
-// codebook = tf.get_variable('codebook', [x.shape[-1].value, codebook_bits],
-// dtype=x.dtype,
-// initializer=tf.orthogonal_initializer())
-// codes = compare_and_threshold(tf.matmul(x, codebook), threshold=0.)
-// codes = tf.bitcast(codes, tf.int32) # go from uint8 to int32
-// # now codes has shape x.shape[:-1] + [codebook_size]
-// ```
-//
-// **NOTE**: Currently, the innermost dimension of the tensor must be divisible
-// by 8.
-//
-// Given an `input` shaped `[s0, s1, ..., s_n]`, the output is
-// a `uint8` tensor shaped `[s0, s1, ..., s_n / 8]`.
-//
-// Arguments:
-// input: Values to compare against `threshold` and bitpack.
-// threshold: Threshold to compare against.
+// QuantizedInstanceNormGivenYMax sets the optional given_y_max attribute to value.
//
-// Returns The bitpacked comparisons.
-func CompareAndBitpack(scope *Scope, input tf.Output, threshold tf.Output) (output tf.Output) {
- if scope.Err() != nil {
- return
+// value: Output in `y_max` if `output_range_given` is True.
+// If not specified, defaults to 0
+func QuantizedInstanceNormGivenYMax(value float32) QuantizedInstanceNormAttr {
+ return func(m optionalAttr) {
+ m["given_y_max"] = value
}
- opspec := tf.OpSpec{
- Type: "CompareAndBitpack",
- Input: []tf.Input{
- input, threshold,
- },
+}
+
+// QuantizedInstanceNormVarianceEpsilon sets the optional variance_epsilon attribute to value.
+//
+// value: A small float number to avoid dividing by 0.
+// If not specified, defaults to 1e-05
+func QuantizedInstanceNormVarianceEpsilon(value float32) QuantizedInstanceNormAttr {
+ return func(m optionalAttr) {
+ m["variance_epsilon"] = value
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
}
-// Replaces the contents of the table with the specified keys and values.
+// QuantizedInstanceNormMinSeparation sets the optional min_separation attribute to value.
//
-// The tensor `keys` must be of the same type as the keys of the table.
-// The tensor `values` must be of the type of the table values.
+// value: Minimum value of `y_max - y_min`
+// If not specified, defaults to 0.001
+func QuantizedInstanceNormMinSeparation(value float32) QuantizedInstanceNormAttr {
+ return func(m optionalAttr) {
+ m["min_separation"] = value
+ }
+}
+
+// Quantized Instance normalization.
//
// Arguments:
-// table_handle: Handle to the table.
-// keys: Any shape. Keys to look up.
-// values: Values to associate with keys.
+// x: A 4D input Tensor.
+// x_min: The value represented by the lowest quantized input.
+// x_max: The value represented by the highest quantized input.
//
-// Returns the created operation.
-func LookupTableImportV2(scope *Scope, table_handle tf.Output, keys tf.Output, values tf.Output) (o *tf.Operation) {
+// Returns A 4D Tensor.The value represented by the lowest quantized output.The value represented by the highest quantized output.
+func QuantizedInstanceNorm(scope *Scope, x tf.Output, x_min tf.Output, x_max tf.Output, optional ...QuantizedInstanceNormAttr) (y tf.Output, y_min tf.Output, y_max tf.Output) {
if scope.Err() != nil {
return
}
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
opspec := tf.OpSpec{
- Type: "LookupTableImportV2",
+ Type: "QuantizedInstanceNorm",
Input: []tf.Input{
- table_handle, keys, values,
+ x, x_min, x_max,
},
+ Attrs: attrs,
}
- return scope.AddOperation(opspec)
+ op := scope.AddOperation(opspec)
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// HashTableV2Attr is an optional argument to HashTableV2.
-type HashTableV2Attr func(optionalAttr)
-
-// HashTableV2Container sets the optional container attribute to value.
-//
-// value: If non-empty, this table is placed in the given container.
-// Otherwise, a default container is used.
-// If not specified, defaults to ""
-func HashTableV2Container(value string) HashTableV2Attr {
- return func(m optionalAttr) {
- m["container"] = value
- }
-}
+// FakeQuantWithMinMaxVarsAttr is an optional argument to FakeQuantWithMinMaxVars.
+type FakeQuantWithMinMaxVarsAttr func(optionalAttr)
-// HashTableV2SharedName sets the optional shared_name attribute to value.
-//
-// value: If non-empty, this table is shared under the given name across
-// multiple sessions.
-// If not specified, defaults to ""
-func HashTableV2SharedName(value string) HashTableV2Attr {
+// FakeQuantWithMinMaxVarsNumBits sets the optional num_bits attribute to value.
+// If not specified, defaults to 8
+func FakeQuantWithMinMaxVarsNumBits(value int64) FakeQuantWithMinMaxVarsAttr {
return func(m optionalAttr) {
- m["shared_name"] = value
+ m["num_bits"] = value
}
}
-// HashTableV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
-//
-// value: If true and shared_name is empty, the table is shared
-// using the node name.
+// FakeQuantWithMinMaxVarsNarrowRange sets the optional narrow_range attribute to value.
// If not specified, defaults to false
-func HashTableV2UseNodeNameSharing(value bool) HashTableV2Attr {
+func FakeQuantWithMinMaxVarsNarrowRange(value bool) FakeQuantWithMinMaxVarsAttr {
return func(m optionalAttr) {
- m["use_node_name_sharing"] = value
+ m["narrow_range"] = value
}
}
-// Creates a non-initialized hash table.
+// Fake-quantize the 'inputs' tensor of type float via global float scalars `min`
//
-// This op creates a hash table, specifying the type of its keys and values.
-// Before using the table you will have to initialize it. After initialization the
-// table will be immutable.
+// and `max` to 'outputs' tensor of same shape as `inputs`.
//
-// Arguments:
-// key_dtype: Type of the table keys.
-// value_dtype: Type of the table values.
+// `[min; max]` define the clamping range for the `inputs` data.
+// `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
+// when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
+// then de-quantized and output as floats in `[min; max]` interval.
+// `num_bits` is the bitwidth of the quantization; between 2 and 8, inclusive.
//
-// Returns Handle to a table.
-func HashTableV2(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, optional ...HashTableV2Attr) (table_handle tf.Output) {
+// This operation has a gradient and thus allows for training `min` and `max`
+// values.
+func FakeQuantWithMinMaxVars(scope *Scope, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsAttr) (outputs tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"key_dtype": key_dtype, "value_dtype": value_dtype}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "HashTableV2",
-
+ Type: "FakeQuantWithMinMaxVars",
+ Input: []tf.Input{
+ inputs, min, max,
+ },
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// MutableHashTableV2Attr is an optional argument to MutableHashTableV2.
-type MutableHashTableV2Attr func(optionalAttr)
-
-// MutableHashTableV2Container sets the optional container attribute to value.
-//
-// value: If non-empty, this table is placed in the given container.
-// Otherwise, a default container is used.
-// If not specified, defaults to ""
-func MutableHashTableV2Container(value string) MutableHashTableV2Attr {
- return func(m optionalAttr) {
- m["container"] = value
- }
-}
+// FakeQuantWithMinMaxVarsPerChannelGradientAttr is an optional argument to FakeQuantWithMinMaxVarsPerChannelGradient.
+type FakeQuantWithMinMaxVarsPerChannelGradientAttr func(optionalAttr)
-// MutableHashTableV2SharedName sets the optional shared_name attribute to value.
+// FakeQuantWithMinMaxVarsPerChannelGradientNumBits sets the optional num_bits attribute to value.
//
-// value: If non-empty, this table is shared under the given name across
-// multiple sessions.
-// If not specified, defaults to ""
-func MutableHashTableV2SharedName(value string) MutableHashTableV2Attr {
+// value: The bitwidth of the quantization; between 2 and 8, inclusive.
+// If not specified, defaults to 8
+func FakeQuantWithMinMaxVarsPerChannelGradientNumBits(value int64) FakeQuantWithMinMaxVarsPerChannelGradientAttr {
return func(m optionalAttr) {
- m["shared_name"] = value
+ m["num_bits"] = value
}
}
-// MutableHashTableV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
+// FakeQuantWithMinMaxVarsPerChannelGradientNarrowRange sets the optional narrow_range attribute to value.
//
-// value: If true and shared_name is empty, the table is shared
-// using the node name.
+// value: Whether to quantize into 2^num_bits - 1 distinct values.
// If not specified, defaults to false
-func MutableHashTableV2UseNodeNameSharing(value bool) MutableHashTableV2Attr {
+func FakeQuantWithMinMaxVarsPerChannelGradientNarrowRange(value bool) FakeQuantWithMinMaxVarsPerChannelGradientAttr {
return func(m optionalAttr) {
- m["use_node_name_sharing"] = value
+ m["narrow_range"] = value
}
}
-// Creates an empty hash table.
-//
-// This op creates a mutable hash table, specifying the type of its keys and
-// values. Each value must be a scalar. Data can be inserted into the table using
-// the insert operations. It does not support the initialization operation.
+// Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation.
//
// Arguments:
-// key_dtype: Type of the table keys.
-// value_dtype: Type of the table values.
+// gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation,
+// shape one of: `[d]`, `[b, d]`, `[b, h, w, d]`.
+// inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape
+// same as `gradients`.
+// min, max: Quantization interval, floats of shape `[d]`.
//
-// Returns Handle to a table.
-func MutableHashTableV2(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, optional ...MutableHashTableV2Attr) (table_handle tf.Output) {
+//
+//
+// Returns Backpropagated gradients w.r.t. inputs, shape same as
+// `inputs`:
+// `gradients * (inputs >= min && inputs <= max)`.Backpropagated gradients w.r.t. min parameter, shape `[d]`:
+// `sum_per_d(gradients * (inputs < min))`.Backpropagated gradients w.r.t. max parameter, shape `[d]`:
+// `sum_per_d(gradients * (inputs > max))`.
+func FakeQuantWithMinMaxVarsPerChannelGradient(scope *Scope, gradients tf.Output, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsPerChannelGradientAttr) (backprops_wrt_input tf.Output, backprop_wrt_min tf.Output, backprop_wrt_max tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"key_dtype": key_dtype, "value_dtype": value_dtype}
+ attrs := map[string]interface{}{}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "MutableHashTableV2",
-
+ Type: "FakeQuantWithMinMaxVarsPerChannelGradient",
+ Input: []tf.Input{
+ gradients, inputs, min, max,
+ },
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
-// MapUnstageNoKeyAttr is an optional argument to MapUnstageNoKey.
-type MapUnstageNoKeyAttr func(optionalAttr)
-
-// MapUnstageNoKeyCapacity sets the optional capacity attribute to value.
-// If not specified, defaults to 0
-//
-// REQUIRES: value >= 0
-func MapUnstageNoKeyCapacity(value int64) MapUnstageNoKeyAttr {
- return func(m optionalAttr) {
- m["capacity"] = value
- }
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// MapUnstageNoKeyMemoryLimit sets the optional memory_limit attribute to value.
-// If not specified, defaults to 0
-//
-// REQUIRES: value >= 0
-func MapUnstageNoKeyMemoryLimit(value int64) MapUnstageNoKeyAttr {
- return func(m optionalAttr) {
- m["memory_limit"] = value
- }
-}
+// QuantizeV2Attr is an optional argument to QuantizeV2.
+type QuantizeV2Attr func(optionalAttr)
-// MapUnstageNoKeyContainer sets the optional container attribute to value.
-// If not specified, defaults to ""
-func MapUnstageNoKeyContainer(value string) MapUnstageNoKeyAttr {
+// QuantizeV2Mode sets the optional mode attribute to value.
+// If not specified, defaults to "MIN_COMBINED"
+func QuantizeV2Mode(value string) QuantizeV2Attr {
return func(m optionalAttr) {
- m["container"] = value
+ m["mode"] = value
}
}
-// MapUnstageNoKeySharedName sets the optional shared_name attribute to value.
-// If not specified, defaults to ""
-func MapUnstageNoKeySharedName(value string) MapUnstageNoKeyAttr {
+// QuantizeV2RoundMode sets the optional round_mode attribute to value.
+// If not specified, defaults to "HALF_AWAY_FROM_ZERO"
+func QuantizeV2RoundMode(value string) QuantizeV2Attr {
return func(m optionalAttr) {
- m["shared_name"] = value
+ m["round_mode"] = value
}
}
-// Op removes and returns a random (key, value)
+// Quantize the 'input' tensor of type float to 'output' tensor of type 'T'.
//
-// from the underlying container. If the underlying container
-// does not contain elements, the op will block until it does.
-func MapUnstageNoKey(scope *Scope, indices tf.Output, dtypes []tf.DataType, optional ...MapUnstageNoKeyAttr) (key tf.Output, values []tf.Output) {
+// [min_range, max_range] are scalar floats that specify the range for
+// the 'input' data. The 'mode' attribute controls exactly which calculations are
+// used to convert the float values to their quantized equivalents. The
+// 'round_mode' attribute controls which rounding tie-breaking algorithm is used
+// when rounding float values to their quantized equivalents.
+//
+// In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
+//
+// ```
+// out[i] = (in[i] - min_range) * range(T) / (max_range - min_range)
+// if T == qint8, out[i] -= (range(T) + 1) / 2.0
+// ```
+// here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
+//
+// *MIN_COMBINED Mode Example*
+//
+// Assume the input is type float and has a possible range of [0.0, 6.0] and the
+// output type is quint8 ([0, 255]). The min_range and max_range values should be
+// specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each
+// value of the input by 255/6 and cast to quint8.
+//
+// If the output type was qint8 ([-128, 127]), the operation will additionally
+// subtract each value by 128 prior to casting, so that the range of values aligns
+// with the range of qint8.
+//
+// If the mode is 'MIN_FIRST', then this approach is used:
+//
+// ```
+// num_discrete_values = 1 << (# of bits in T)
+// range_adjust = num_discrete_values / (num_discrete_values - 1)
+// range = (range_max - range_min) * range_adjust
+// range_scale = num_discrete_values / range
+// quantized = round(input * range_scale) - round(range_min * range_scale) +
+// numeric_limits<T>::min()
+// quantized = max(quantized, numeric_limits<T>::min())
+// quantized = min(quantized, numeric_limits<T>::max())
+// ```
+//
+// The biggest difference between this and MIN_COMBINED is that the minimum range
+// is rounded first, before it's subtracted from the rounded value. With
+// MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing
+// and dequantizing will introduce a larger and larger error.
+//
+// *SCALED mode Example*
+//
+// `SCALED` mode matches the quantization approach used in
+// `QuantizeAndDequantize{V2|V3}`.
+//
+// If the mode is `SCALED`, we do not use the full range of the output type,
+// choosing to elide the lowest possible value for symmetry (e.g., output range is
+// -127 to 127, not -128 to 127 for signed 8 bit quantization), so that 0.0 maps to
+// 0.
+//
+// We first find the range of values in our tensor. The
+// range we use is always centered on 0, so we find m such that
+// ```c++
+// m = max(abs(input_min), abs(input_max))
+// ```
+//
+// Our input tensor range is then `[-m, m]`.
+//
+// Next, we choose our fixed-point quantization buckets, `[min_fixed, max_fixed]`.
+// If T is signed, this is
+// ```
+// num_bits = sizeof(T) * 8
+// [min_fixed, max_fixed] =
+// [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1]
+// ```
+//
+// Otherwise, if T is unsigned, the fixed-point range is
+// ```
+// [min_fixed, max_fixed] = [0, (1 << num_bits) - 1]
+// ```
+//
+// From this we compute our scaling factor, s:
+// ```c++
+// s = (max_fixed - min_fixed) / (2 * m)
+// ```
+//
+// Now we can quantize the elements of our tensor:
+// ```c++
+// result = round(input * s)
+// ```
+//
+// One thing to watch out for is that the operator may choose to adjust the
+// requested minimum and maximum values slightly during the quantization process,
+// so you should always use the output ports as the range for further calculations.
+// For example, if the requested minimum and maximum values are close to equal,
+// they will be separated by a small epsilon value to prevent ill-formed quantized
+// buffers from being created. Otherwise, you can end up with buffers where all the
+// quantized values map to the same float value, which causes problems for
+// operations that have to perform further calculations on them.
+//
+// Arguments:
+//
+// min_range: The minimum scalar value possibly produced for the input.
+// max_range: The maximum scalar value possibly produced for the input.
+//
+//
+// Returns The quantized data produced from the float input.The actual minimum scalar value used for the output.The actual maximum scalar value used for the output.
+func QuantizeV2(scope *Scope, input tf.Output, min_range tf.Output, max_range tf.Output, T tf.DataType, optional ...QuantizeV2Attr) (output tf.Output, output_min tf.Output, output_max tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"dtypes": dtypes}
+ attrs := map[string]interface{}{"T": T}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "MapUnstageNoKey",
+ Type: "QuantizeV2",
Input: []tf.Input{
- indices,
+ input, min_range, max_range,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
- if scope.Err() != nil {
- return
- }
- var idx int
- var err error
- key = op.Output(idx)
- if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
- scope.UpdateErr("MapUnstageNoKey", err)
- return
- }
- return key, values
-}
-
-// ResourceApplyProximalAdagradAttr is an optional argument to ResourceApplyProximalAdagrad.
-type ResourceApplyProximalAdagradAttr func(optionalAttr)
-
-// ResourceApplyProximalAdagradUseLocking sets the optional use_locking attribute to value.
-//
-// value: If True, updating of the var and accum tensors will be protected by
-// a lock; otherwise the behavior is undefined, but may exhibit less contention.
-// If not specified, defaults to false
-func ResourceApplyProximalAdagradUseLocking(value bool) ResourceApplyProximalAdagradAttr {
- return func(m optionalAttr) {
- m["use_locking"] = value
- }
+ return op.Output(0), op.Output(1), op.Output(2)
}
-// Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.
-//
-// accum += grad * grad
-// prox_v = var - lr * grad * (1 / sqrt(accum))
-// var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
+// Flushes the writer's unwritten events.
//
// Arguments:
-// var_: Should be from a Variable().
-// accum: Should be from a Variable().
-// lr: Scaling factor. Must be a scalar.
-// l1: L1 regularization. Must be a scalar.
-// l2: L2 regularization. Must be a scalar.
-// grad: The gradient.
+// writer: A handle to the summary writer resource.
//
// Returns the created operation.
-func ResourceApplyProximalAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, optional ...ResourceApplyProximalAdagradAttr) (o *tf.Operation) {
+func FlushSummaryWriter(scope *Scope, writer tf.Output) (o *tf.Operation) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "ResourceApplyProximalAdagrad",
+ Type: "FlushSummaryWriter",
Input: []tf.Input{
- var_, accum, lr, l1, l2, grad,
+ writer,
},
- Attrs: attrs,
}
return scope.AddOperation(opspec)
}
-// MutableHashTableOfTensorsV2Attr is an optional argument to MutableHashTableOfTensorsV2.
-type MutableHashTableOfTensorsV2Attr func(optionalAttr)
-
-// MutableHashTableOfTensorsV2Container sets the optional container attribute to value.
-//
-// value: If non-empty, this table is placed in the given container.
-// Otherwise, a default container is used.
-// If not specified, defaults to ""
-func MutableHashTableOfTensorsV2Container(value string) MutableHashTableOfTensorsV2Attr {
- return func(m optionalAttr) {
- m["container"] = value
- }
-}
+// StackV2Attr is an optional argument to StackV2.
+type StackV2Attr func(optionalAttr)
-// MutableHashTableOfTensorsV2SharedName sets the optional shared_name attribute to value.
+// StackV2StackName sets the optional stack_name attribute to value.
//
-// value: If non-empty, this table is shared under the given name across
-// multiple sessions.
+// value: Overrides the name used for the temporary stack resource. Default
+// value is the name of the 'Stack' op (which is guaranteed unique).
// If not specified, defaults to ""
-func MutableHashTableOfTensorsV2SharedName(value string) MutableHashTableOfTensorsV2Attr {
- return func(m optionalAttr) {
- m["shared_name"] = value
- }
-}
-
-// MutableHashTableOfTensorsV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
-// If not specified, defaults to false
-func MutableHashTableOfTensorsV2UseNodeNameSharing(value bool) MutableHashTableOfTensorsV2Attr {
- return func(m optionalAttr) {
- m["use_node_name_sharing"] = value
- }
-}
-
-// MutableHashTableOfTensorsV2ValueShape sets the optional value_shape attribute to value.
-// If not specified, defaults to <>
-func MutableHashTableOfTensorsV2ValueShape(value tf.Shape) MutableHashTableOfTensorsV2Attr {
+func StackV2StackName(value string) StackV2Attr {
return func(m optionalAttr) {
- m["value_shape"] = value
+ m["stack_name"] = value
}
}
-// Creates an empty hash table.
-//
-// This op creates a mutable hash table, specifying the type of its keys and
-// values. Each value must be a vector. Data can be inserted into the table using
-// the insert operations. It does not support the initialization operation.
+// A stack that produces elements in first-in last-out order.
//
// Arguments:
-// key_dtype: Type of the table keys.
-// value_dtype: Type of the table values.
+// max_size: The maximum size of the stack if non-negative. If negative, the stack
+// size is unlimited.
+// elem_type: The type of the elements on the stack.
//
-// Returns Handle to a table.
-func MutableHashTableOfTensorsV2(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, optional ...MutableHashTableOfTensorsV2Attr) (table_handle tf.Output) {
+// Returns The handle to the stack.
+func StackV2(scope *Scope, max_size tf.Output, elem_type tf.DataType, optional ...StackV2Attr) (handle tf.Output) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"key_dtype": key_dtype, "value_dtype": value_dtype}
+ attrs := map[string]interface{}{"elem_type": elem_type}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
- Type: "MutableHashTableOfTensorsV2",
-
+ Type: "StackV2",
+ Input: []tf.Input{
+ max_size,
+ },
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
-// Partitions `data` into `num_partitions` tensors using indices from `partitions`.
-//
-// For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]`
-// becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = i`
-// are placed in `outputs[i]` in lexicographic order of `js`, and the first
-// dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`.
-// In detail,
-//
-// ```python
-// outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]
-//
-// outputs[i] = pack([data[js, ...] for js if partitions[js] == i])
-// ```
-//
-// `data.shape` must start with `partitions.shape`.
-//
-// For example:
-//
-// ```python
-// # Scalar partitions.
-// partitions = 1
-// num_partitions = 2
-// data = [10, 20]
-// outputs[0] = [] # Empty with shape [0, 2]
-// outputs[1] = [[10, 20]]
-//
-// # Vector partitions.
-// partitions = [0, 0, 1, 1, 0]
-// num_partitions = 2
-// data = [10, 20, 30, 40, 50]
-// outputs[0] = [10, 20, 50]
-// outputs[1] = [30, 40]
-// ```
-//
-// See `dynamic_stitch` for an example on how to merge partitions back.
+// Flushes and closes the summary writer.
//
-// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
-// <img style="width:100%" src="https://www.tensorflow.org/images/DynamicPartition.png" alt>
-// </div>
+// Also removes it from the resource manager. To reopen, use another
+// CreateSummaryFileWriter op.
//
// Arguments:
+// writer: A handle to the summary writer resource.
//
-// partitions: Any shape. Indices in the range `[0, num_partitions)`.
-// num_partitions: The number of partitions to output.
-func DynamicPartition(scope *Scope, data tf.Output, partitions tf.Output, num_partitions int64) (outputs []tf.Output) {
+// Returns the created operation.
+func CloseSummaryWriter(scope *Scope, writer tf.Output) (o *tf.Operation) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{"num_partitions": num_partitions}
opspec := tf.OpSpec{
- Type: "DynamicPartition",
+ Type: "CloseSummaryWriter",
Input: []tf.Input{
- data, partitions,
+ writer,
},
- Attrs: attrs,
}
- op := scope.AddOperation(opspec)
+ return scope.AddOperation(opspec)
+}
+
+// Outputs a `Summary` protocol buffer with a tensor.
+//
+// Arguments:
+// writer: A handle to a summary writer.
+// step: The step to write the summary for.
+// tensor: A tensor to serialize.
+// tag: The summary's tag.
+// summary_metadata: Serialized SummaryMetadata protocol buffer containing
+// plugin-related metadata for this summary.
+//
+// Returns the created operation.
+func WriteSummary(scope *Scope, writer tf.Output, step tf.Output, tensor tf.Output, tag tf.Output, summary_metadata tf.Output) (o *tf.Operation) {
if scope.Err() != nil {
return
}
- var idx int
- var err error
- if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
- scope.UpdateErr("DynamicPartition", err)
- return
+ opspec := tf.OpSpec{
+ Type: "WriteSummary",
+ Input: []tf.Input{
+ writer, step, tensor, tag, summary_metadata,
+ },
}
- return outputs
+ return scope.AddOperation(opspec)
}
-// SerializeSparseAttr is an optional argument to SerializeSparse.
-type SerializeSparseAttr func(optionalAttr)
-
-// SerializeSparseOutType sets the optional out_type attribute to value.
+// Outputs a `tf.Event` protocol buffer.
//
-// value: The `dtype` to use for serialization; the supported types are `string`
-// (default) and `variant`.
-// If not specified, defaults to DT_STRING
-func SerializeSparseOutType(value tf.DataType) SerializeSparseAttr {
- return func(m optionalAttr) {
- m["out_type"] = value
- }
-}
-
-// Serialize a `SparseTensor` into a `[3]` `Tensor` object.
+// When CreateSummaryDbWriter is being used, this op can be useful for
+// importing data from event logs.
//
// Arguments:
-// sparse_indices: 2-D. The `indices` of the `SparseTensor`.
-// sparse_values: 1-D. The `values` of the `SparseTensor`.
-// sparse_shape: 1-D. The `shape` of the `SparseTensor`.
-func SerializeSparse(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...SerializeSparseAttr) (serialized_sparse tf.Output) {
+// writer: A handle to a summary writer.
+// event: A string containing a binary-encoded tf.Event proto.
+//
+// Returns the created operation.
+func ImportEvent(scope *Scope, writer tf.Output, event tf.Output) (o *tf.Operation) {
if scope.Err() != nil {
return
}
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
opspec := tf.OpSpec{
- Type: "SerializeSparse",
+ Type: "ImportEvent",
Input: []tf.Input{
- sparse_indices, sparse_values, sparse_shape,
+ writer, event,
},
- Attrs: attrs,
}
- op := scope.AddOperation(opspec)
- return op.Output(0)
+ return scope.AddOperation(opspec)
}