aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/go
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-04-12 11:46:26 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-04-12 11:51:37 -0700
commit10e60219b71fc48e07b0afaa6edeec2d9afac24d (patch)
tree9a4187e3a9dc17e8814e849872d156ed7183a3c8 /tensorflow/go
parent024b037e9ad430c4023e3c9d250f3934f38de5cf (diff)
Go: Update generated wrapper functions for TensorFlow ops.
PiperOrigin-RevId: 192644946
Diffstat (limited to 'tensorflow/go')
-rw-r--r--tensorflow/go/op/wrappers.go184
1 files changed, 92 insertions, 92 deletions
diff --git a/tensorflow/go/op/wrappers.go b/tensorflow/go/op/wrappers.go
index 2d3e369328..1d5ebf6687 100644
--- a/tensorflow/go/op/wrappers.go
+++ b/tensorflow/go/op/wrappers.go
@@ -4932,6 +4932,70 @@ func IsNan(scope *Scope, x tf.Output) (y tf.Output) {
return op.Output(0)
}
+// Identity op for gradient debugging.
+//
+// This op is hidden from public in Python. It is used by TensorFlow Debugger to
+// register gradient tensors for gradient debugging.
+// This op operates on non-reference-type tensors.
+func DebugGradientIdentity(scope *Scope, input tf.Output) (output tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "DebugGradientIdentity",
+ Input: []tf.Input{
+ input,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
+// ResourceSparseApplyAdadeltaAttr is an optional argument to ResourceSparseApplyAdadelta.
+type ResourceSparseApplyAdadeltaAttr func(optionalAttr)
+
+// ResourceSparseApplyAdadeltaUseLocking sets the optional use_locking attribute to value.
+//
+// value: If True, updating of the var and accum tensors will be protected by
+// a lock; otherwise the behavior is undefined, but may exhibit less contention.
+// If not specified, defaults to false
+func ResourceSparseApplyAdadeltaUseLocking(value bool) ResourceSparseApplyAdadeltaAttr {
+ return func(m optionalAttr) {
+ m["use_locking"] = value
+ }
+}
+
+// var: Should be from a Variable().
+//
+// Arguments:
+//
+// accum: Should be from a Variable().
+// accum_update: : Should be from a Variable().
+// lr: Learning rate. Must be a scalar.
+// rho: Decay factor. Must be a scalar.
+// epsilon: Constant factor. Must be a scalar.
+// grad: The gradient.
+// indices: A vector of indices into the first dimension of var and accum.
+//
+// Returns the created operation.
+func ResourceSparseApplyAdadelta(scope *Scope, var_ tf.Output, accum tf.Output, accum_update tf.Output, lr tf.Output, rho tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyAdadeltaAttr) (o *tf.Operation) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
+ opspec := tf.OpSpec{
+ Type: "ResourceSparseApplyAdadelta",
+ Input: []tf.Input{
+ var_, accum, accum_update, lr, rho, epsilon, grad, indices,
+ },
+ Attrs: attrs,
+ }
+ return scope.AddOperation(opspec)
+}
+
// Computes rectified linear gradients for a Relu operation.
//
// Arguments:
@@ -12327,34 +12391,6 @@ func FusedResizeAndPadConv2D(scope *Scope, input tf.Output, size tf.Output, padd
return op.Output(0)
}
-// Inverse 3D fast Fourier transform.
-//
-// Computes the inverse 3-dimensional discrete Fourier transform over the
-// inner-most 3 dimensions of `input`.
-//
-// Arguments:
-// input: A complex64 tensor.
-//
-// Returns A complex64 tensor of the same shape as `input`. The inner-most 3
-// dimensions of `input` are replaced with their inverse 3D Fourier transform.
-//
-// @compatibility(numpy)
-// Equivalent to np.fft.ifftn with 3 dimensions.
-// @end_compatibility
-func IFFT3D(scope *Scope, input tf.Output) (output tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "IFFT3D",
- Input: []tf.Input{
- input,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
// Adds `bias` to `value`.
//
// This is a deprecated version of BiasAdd and will be soon removed.
@@ -19183,6 +19219,34 @@ func Invert(scope *Scope, x tf.Output) (y tf.Output) {
return op.Output(0)
}
+// Inverse 3D fast Fourier transform.
+//
+// Computes the inverse 3-dimensional discrete Fourier transform over the
+// inner-most 3 dimensions of `input`.
+//
+// Arguments:
+// input: A complex64 tensor.
+//
+// Returns A complex64 tensor of the same shape as `input`. The inner-most 3
+// dimensions of `input` are replaced with their inverse 3D Fourier transform.
+//
+// @compatibility(numpy)
+// Equivalent to np.fft.ifftn with 3 dimensions.
+// @end_compatibility
+func IFFT3D(scope *Scope, input tf.Output) (output tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "IFFT3D",
+ Input: []tf.Input{
+ input,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
// Deprecated. Disallowed in GraphDef version >= 2.
//
// DEPRECATED at GraphDef version 2: Use AdjustContrastv2 instead
@@ -22625,70 +22689,6 @@ func TensorArrayGradV2(scope *Scope, handle tf.Output, flow_in tf.Output, source
return op.Output(0)
}
-// ResourceSparseApplyAdadeltaAttr is an optional argument to ResourceSparseApplyAdadelta.
-type ResourceSparseApplyAdadeltaAttr func(optionalAttr)
-
-// ResourceSparseApplyAdadeltaUseLocking sets the optional use_locking attribute to value.
-//
-// value: If True, updating of the var and accum tensors will be protected by
-// a lock; otherwise the behavior is undefined, but may exhibit less contention.
-// If not specified, defaults to false
-func ResourceSparseApplyAdadeltaUseLocking(value bool) ResourceSparseApplyAdadeltaAttr {
- return func(m optionalAttr) {
- m["use_locking"] = value
- }
-}
-
-// var: Should be from a Variable().
-//
-// Arguments:
-//
-// accum: Should be from a Variable().
-// accum_update: : Should be from a Variable().
-// lr: Learning rate. Must be a scalar.
-// rho: Decay factor. Must be a scalar.
-// epsilon: Constant factor. Must be a scalar.
-// grad: The gradient.
-// indices: A vector of indices into the first dimension of var and accum.
-//
-// Returns the created operation.
-func ResourceSparseApplyAdadelta(scope *Scope, var_ tf.Output, accum tf.Output, accum_update tf.Output, lr tf.Output, rho tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyAdadeltaAttr) (o *tf.Operation) {
- if scope.Err() != nil {
- return
- }
- attrs := map[string]interface{}{}
- for _, a := range optional {
- a(attrs)
- }
- opspec := tf.OpSpec{
- Type: "ResourceSparseApplyAdadelta",
- Input: []tf.Input{
- var_, accum, accum_update, lr, rho, epsilon, grad, indices,
- },
- Attrs: attrs,
- }
- return scope.AddOperation(opspec)
-}
-
-// Identity op for gradient debugging.
-//
-// This op is hidden from public in Python. It is used by TensorFlow Debugger to
-// register gradient tensors for gradient debugging.
-// This op operates on non-reference-type tensors.
-func DebugGradientIdentity(scope *Scope, input tf.Output) (output tf.Output) {
- if scope.Err() != nil {
- return
- }
- opspec := tf.OpSpec{
- Type: "DebugGradientIdentity",
- Input: []tf.Input{
- input,
- },
- }
- op := scope.AddOperation(opspec)
- return op.Output(0)
-}
-
// Return substrings from `Tensor` of strings.
//
// For each string in the input `Tensor`, creates a substring starting at index