diff options
-rw-r--r-- | tensorflow/go/genop/internal/genop.go | 11 | ||||
-rw-r--r-- | tensorflow/go/genop/internal/genop_test.go | 6 | ||||
-rw-r--r-- | tensorflow/go/op/wrappers.go | 258 |
3 files changed, 185 insertions, 90 deletions
diff --git a/tensorflow/go/genop/internal/genop.go b/tensorflow/go/genop/internal/genop.go index d9ebec0f8c..16e4d0e512 100644 --- a/tensorflow/go/genop/internal/genop.go +++ b/tensorflow/go/genop/internal/genop.go @@ -212,6 +212,10 @@ func {{$.Op.Name}}{{CamelCase .Name}}(value {{GoType .Type}}) {{$.Op.Name}}Attr {{- end -}} {{- end -}} +{{- if (not .Op.OutputArg) }} +// +// Returns the created operation. +{{- else }} {{- if .DescribeOutputs}} // {{- if ((len .Op.OutputArg) eq 1) }} @@ -223,6 +227,7 @@ func {{$.Op.Name}}{{CamelCase .Name}}(value {{GoType .Type}}) {{$.Op.Name}}Attr {{- end -}} {{- end -}} {{- end -}} +{{- end -}} {{- /* The function signature. @@ -244,10 +249,12 @@ func {{.Op.Name}} {{if .OptionalAttrs}}, optional ...{{.Op.Name}}Attr{{end -}} ) -{{- /* Construct outputs: len(OpDef.OutputArg) */ -}} +{{- /* Construct outputs: len(OpDef.OutputArg) or a *tf.Operation */ -}} {{if .Op.OutputArg -}} ({{range $i,$a := .Op.OutputArg}}{{if $i}}, {{end}}{{Identifier $a.Name}} {{if IsListArg $a}}[]{{end}}tf.Output{{end -}}) +{{- else -}} +(o *tf.Operation) {{- end }} { if scope.Err() != nil { return @@ -295,7 +302,7 @@ func {{.Op.Name}} return {{range $i, $a := .Op.OutputArg}}{{if $i}}, {{end}}op.Output({{$i}}){{end}} {{- end }}{{- /* if .HasListOutput */}} {{- else }} - scope.AddOperation(opspec) + return scope.AddOperation(opspec) {{- end }}{{- /* if .Op.OutputArg */}} } `)) diff --git a/tensorflow/go/genop/internal/genop_test.go b/tensorflow/go/genop/internal/genop_test.go index c3057e9119..c66e38fce0 100644 --- a/tensorflow/go/genop/internal/genop_test.go +++ b/tensorflow/go/genop/internal/genop_test.go @@ -39,14 +39,16 @@ summary: "No. Op." `, wanted: ` // No. Op. -func NoOp(scope *Scope) { +// +// Returns the created operation. +func NoOp(scope *Scope) (o *tf.Operation) { if scope.Err() != nil { return } opspec := tf.OpSpec{ Type: "NoOp", } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } `, }, diff --git a/tensorflow/go/op/wrappers.go b/tensorflow/go/op/wrappers.go index 20a212a44c..4a4d2cbe66 100644 --- a/tensorflow/go/op/wrappers.go +++ b/tensorflow/go/op/wrappers.go @@ -64,7 +64,9 @@ func makeOutputList(op *tf.Operation, start int, output string) ([]tf.Output, in // resource: Should be from a `Variable` node. // indices: A tensor of indices into the first dimension of `ref`. // updates: A tensor of updated values to add to `ref`. -func ResourceScatterAdd(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) { +// +// Returns the created operation. +func ResourceScatterAdd(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) { if scope.Err() != nil { return } @@ -74,7 +76,7 @@ func ResourceScatterAdd(scope *Scope, resource tf.Output, indices tf.Output, upd resource, indices, updates, }, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // Checks whether a resource handle-based variable has been initialized. @@ -109,7 +111,9 @@ func VarIsInitializedOp(scope *Scope, resource tf.Output) (is_initialized tf.Out // Arguments: // resource: handle to the resource in which to store the variable. // value: the value by which the variable will be incremented. -func AssignSubVariableOp(scope *Scope, resource tf.Output, value tf.Output) { +// +// Returns the created operation. +func AssignSubVariableOp(scope *Scope, resource tf.Output, value tf.Output) (o *tf.Operation) { if scope.Err() != nil { return } @@ -119,7 +123,7 @@ func AssignSubVariableOp(scope *Scope, resource tf.Output, value tf.Output) { resource, value, }, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // Adds a value to the current value of a variable. @@ -133,7 +137,9 @@ func AssignSubVariableOp(scope *Scope, resource tf.Output, value tf.Output) { // Arguments: // resource: handle to the resource in which to store the variable. // value: the value by which the variable will be incremented. -func AssignAddVariableOp(scope *Scope, resource tf.Output, value tf.Output) { +// +// Returns the created operation. +func AssignAddVariableOp(scope *Scope, resource tf.Output, value tf.Output) (o *tf.Operation) { if scope.Err() != nil { return } @@ -143,7 +149,7 @@ func AssignAddVariableOp(scope *Scope, resource tf.Output, value tf.Output) { resource, value, }, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // Assigns a new value to a variable. @@ -154,7 +160,9 @@ func AssignAddVariableOp(scope *Scope, resource tf.Output, value tf.Output) { // Arguments: // resource: handle to the resource in which to store the variable. // value: the value to set the new tensor to use. -func AssignVariableOp(scope *Scope, resource tf.Output, value tf.Output) { +// +// Returns the created operation. +func AssignVariableOp(scope *Scope, resource tf.Output, value tf.Output) (o *tf.Operation) { if scope.Err() != nil { return } @@ -164,7 +172,7 @@ func AssignVariableOp(scope *Scope, resource tf.Output, value tf.Output) { resource, value, }, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // VarHandleOpAttr is an optional argument to VarHandleOp. @@ -3017,7 +3025,9 @@ func AbortExitWithoutError(value bool) AbortAttr { // Raise a exception to abort the process when called. If exit_without_error is true, the process will exit normally, otherwise it will exit with a SIGABORT signal. // // Returns nothing but an exception. -func Abort(scope *Scope, optional ...AbortAttr) { +// +// Returns the created operation. +func Abort(scope *Scope, optional ...AbortAttr) (o *tf.Operation) { if scope.Err() != nil { return } @@ -3030,20 +3040,22 @@ func Abort(scope *Scope, optional ...AbortAttr) { Attrs: attrs, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // Does nothing. Serves as a control trigger for scheduling. // // Only useful as a placeholder for control edges. -func ControlTrigger(scope *Scope) { +// +// Returns the created operation. +func ControlTrigger(scope *Scope) (o *tf.Operation) { if scope.Err() != nil { return } opspec := tf.OpSpec{ Type: "ControlTrigger", } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // SpaceToDepth for tensors of type T. @@ -3508,7 +3520,9 @@ func StageSharedName(value string) StageAttr { // // Arguments: // values: a list of tensors -func Stage(scope *Scope, values []tf.Output, optional ...StageAttr) { +// +// Returns the created operation. +func Stage(scope *Scope, values []tf.Output, optional ...StageAttr) (o *tf.Operation) { if scope.Err() != nil { return } @@ -3523,7 +3537,7 @@ func Stage(scope *Scope, values []tf.Output, optional ...StageAttr) { }, Attrs: attrs, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // FakeQuantWithMinMaxArgsAttr is an optional argument to FakeQuantWithMinMaxArgs. @@ -3684,7 +3698,9 @@ func ResourceGather(scope *Scope, resource tf.Output, indices tf.Output, dtype t // // Arguments: // handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad). -func TensorArrayCloseV3(scope *Scope, handle tf.Output) { +// +// Returns the created operation. +func TensorArrayCloseV3(scope *Scope, handle tf.Output) (o *tf.Operation) { if scope.Err() != nil { return } @@ -3694,7 +3710,7 @@ func TensorArrayCloseV3(scope *Scope, handle tf.Output) { handle, }, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // Get the current size of the TensorArray. @@ -4140,7 +4156,9 @@ func QueueCloseV2CancelPendingEnqueues(value bool) QueueCloseV2Attr { // // Arguments: // handle: The handle to a queue. -func QueueCloseV2(scope *Scope, handle tf.Output, optional ...QueueCloseV2Attr) { +// +// Returns the created operation. +func QueueCloseV2(scope *Scope, handle tf.Output, optional ...QueueCloseV2Attr) (o *tf.Operation) { if scope.Err() != nil { return } @@ -4155,7 +4173,7 @@ func QueueCloseV2(scope *Scope, handle tf.Output, optional ...QueueCloseV2Attr) }, Attrs: attrs, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // Concatenates tensors along one dimension. @@ -4253,7 +4271,9 @@ func QueueDequeueUpToV2(scope *Scope, handle tf.Output, n tf.Output, component_t } // Deprecated. Use TensorArrayCloseV3 -func TensorArrayCloseV2(scope *Scope, handle tf.Output) { +// +// Returns the created operation. +func TensorArrayCloseV2(scope *Scope, handle tf.Output) (o *tf.Operation) { if scope.Err() != nil { return } @@ -4263,7 +4283,7 @@ func TensorArrayCloseV2(scope *Scope, handle tf.Output) { handle, }, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // QueueDequeueManyV2Attr is an optional argument to QueueDequeueManyV2. @@ -4357,7 +4377,9 @@ func QueueEnqueueV2TimeoutMs(value int64) QueueEnqueueV2Attr { // Arguments: // handle: The handle to a queue. // components: One or more tensors from which the enqueued tensors should be taken. -func QueueEnqueueV2(scope *Scope, handle tf.Output, components []tf.Output, optional ...QueueEnqueueV2Attr) { +// +// Returns the created operation. +func QueueEnqueueV2(scope *Scope, handle tf.Output, components []tf.Output, optional ...QueueEnqueueV2Attr) (o *tf.Operation) { if scope.Err() != nil { return } @@ -4372,7 +4394,7 @@ func QueueEnqueueV2(scope *Scope, handle tf.Output, components []tf.Output, opti }, Attrs: attrs, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // UnstageAttr is an optional argument to Unstage. @@ -5787,14 +5809,16 @@ func NextIteration(scope *Scope, data tf.Output) (output tf.Output) { } // Does nothing. Only useful as a placeholder for control edges. -func NoOp(scope *Scope) { +// +// Returns the created operation. +func NoOp(scope *Scope) (o *tf.Operation) { if scope.Err() != nil { return } opspec := tf.OpSpec{ Type: "NoOp", } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // Computes softsign: `features / (abs(features) + 1)`. @@ -6276,7 +6300,9 @@ func ResourceApplyAdagradDAUseLocking(value bool) ResourceApplyAdagradDAAttr { // l1: L1 regularization. Must be a scalar. // l2: L2 regularization. Must be a scalar. // global_step: Training step number. Must be a scalar. -func ResourceApplyAdagradDA(scope *Scope, var_ tf.Output, gradient_accumulator tf.Output, gradient_squared_accumulator tf.Output, grad tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, global_step tf.Output, optional ...ResourceApplyAdagradDAAttr) { +// +// Returns the created operation. +func ResourceApplyAdagradDA(scope *Scope, var_ tf.Output, gradient_accumulator tf.Output, gradient_squared_accumulator tf.Output, grad tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, global_step tf.Output, optional ...ResourceApplyAdagradDAAttr) (o *tf.Operation) { if scope.Err() != nil { return } @@ -6291,7 +6317,7 @@ func ResourceApplyAdagradDA(scope *Scope, var_ tf.Output, gradient_accumulator t }, Attrs: attrs, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // ComputeAccidentalHitsAttr is an optional argument to ComputeAccidentalHits. @@ -6465,7 +6491,9 @@ func SegmentMax(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf. // shapes_and_slices: Shape `[N]`. The shapes and slice specifications to use when // saving the tensors. // data: `N` tensors to save. -func SaveSlices(scope *Scope, filename tf.Output, tensor_names tf.Output, shapes_and_slices tf.Output, data []tf.Output) { +// +// Returns the created operation. +func SaveSlices(scope *Scope, filename tf.Output, tensor_names tf.Output, shapes_and_slices tf.Output, data []tf.Output) (o *tf.Operation) { if scope.Err() != nil { return } @@ -6475,7 +6503,7 @@ func SaveSlices(scope *Scope, filename tf.Output, tensor_names tf.Output, shapes filename, tensor_names, shapes_and_slices, tf.OutputList(data), }, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // TensorArrayGatherV2Attr is an optional argument to TensorArrayGatherV2. @@ -8172,7 +8200,9 @@ func DestroyResourceOpIgnoreLookupError(value bool) DestroyResourceOpAttr { // // Arguments: // resource: handle to the resource to delete. -func DestroyResourceOp(scope *Scope, resource tf.Output, optional ...DestroyResourceOpAttr) { +// +// Returns the created operation. +func DestroyResourceOp(scope *Scope, resource tf.Output, optional ...DestroyResourceOpAttr) (o *tf.Operation) { if scope.Err() != nil { return } @@ -8187,7 +8217,7 @@ func DestroyResourceOp(scope *Scope, resource tf.Output, optional ...DestroyReso }, Attrs: attrs, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // ResourceApplyMomentumAttr is an optional argument to ResourceApplyMomentum. @@ -8230,7 +8260,9 @@ func ResourceApplyMomentumUseNesterov(value bool) ResourceApplyMomentumAttr { // lr: Scaling factor. Must be a scalar. // grad: The gradient. // momentum: Momentum. Must be a scalar. -func ResourceApplyMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, momentum tf.Output, optional ...ResourceApplyMomentumAttr) { +// +// Returns the created operation. +func ResourceApplyMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, momentum tf.Output, optional ...ResourceApplyMomentumAttr) (o *tf.Operation) { if scope.Err() != nil { return } @@ -8245,7 +8277,7 @@ func ResourceApplyMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf. }, Attrs: attrs, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // Returns element-wise integer closest to x. @@ -8798,7 +8830,9 @@ func Cholesky(scope *Scope, input tf.Output) (output tf.Output) { // Arguments: // filename: scalar. The name of the file to which we write the contents. // contents: scalar. The content to be written to the output file. -func WriteFile(scope *Scope, filename tf.Output, contents tf.Output) { +// +// Returns the created operation. +func WriteFile(scope *Scope, filename tf.Output, contents tf.Output) (o *tf.Operation) { if scope.Err() != nil { return } @@ -8808,7 +8842,7 @@ func WriteFile(scope *Scope, filename tf.Output, contents tf.Output) { filename, contents, }, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // Reverses specific dimensions of a tensor. @@ -8925,7 +8959,9 @@ func ResourceApplyCenteredRMSPropUseLocking(value bool) ResourceApplyCenteredRMS // // epsilon: Ridge term. Must be a scalar. // grad: The gradient. -func ResourceApplyCenteredRMSProp(scope *Scope, var_ tf.Output, mg tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyCenteredRMSPropAttr) { +// +// Returns the created operation. +func ResourceApplyCenteredRMSProp(scope *Scope, var_ tf.Output, mg tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyCenteredRMSPropAttr) (o *tf.Operation) { if scope.Err() != nil { return } @@ -8940,7 +8976,7 @@ func ResourceApplyCenteredRMSProp(scope *Scope, var_ tf.Output, mg tf.Output, ms }, Attrs: attrs, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // Given a quantized tensor described by (input, input_min, input_max), outputs a @@ -9100,7 +9136,9 @@ func ResourceApplyProximalGradientDescentUseLocking(value bool) ResourceApplyPro // l1: L1 regularization. Must be a scalar. // l2: L2 regularization. Must be a scalar. // delta: The change. -func ResourceApplyProximalGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, l1 tf.Output, l2 tf.Output, delta tf.Output, optional ...ResourceApplyProximalGradientDescentAttr) { +// +// Returns the created operation. +func ResourceApplyProximalGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, l1 tf.Output, l2 tf.Output, delta tf.Output, optional ...ResourceApplyProximalGradientDescentAttr) (o *tf.Operation) { if scope.Err() != nil { return } @@ -9115,7 +9153,7 @@ func ResourceApplyProximalGradientDescent(scope *Scope, var_ tf.Output, alpha tf }, Attrs: attrs, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // ResourceApplyProximalAdagradAttr is an optional argument to ResourceApplyProximalAdagrad. @@ -9145,7 +9183,9 @@ func ResourceApplyProximalAdagradUseLocking(value bool) ResourceApplyProximalAda // l1: L1 regularization. Must be a scalar. // l2: L2 regularization. Must be a scalar. // grad: The gradient. -func ResourceApplyProximalAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, optional ...ResourceApplyProximalAdagradAttr) { +// +// Returns the created operation. +func ResourceApplyProximalAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, optional ...ResourceApplyProximalAdagradAttr) (o *tf.Operation) { if scope.Err() != nil { return } @@ -9160,7 +9200,7 @@ func ResourceApplyProximalAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, }, Attrs: attrs, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // Adds Tensor 'bias' to Tensor 'input' for Quantized types. @@ -9291,7 +9331,9 @@ func ResourceApplyGradientDescentUseLocking(value bool) ResourceApplyGradientDes // var_: Should be from a Variable(). // alpha: Scaling factor. Must be a scalar. // delta: The change. -func ResourceApplyGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, delta tf.Output, optional ...ResourceApplyGradientDescentAttr) { +// +// Returns the created operation. +func ResourceApplyGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, delta tf.Output, optional ...ResourceApplyGradientDescentAttr) (o *tf.Operation) { if scope.Err() != nil { return } @@ -9306,7 +9348,7 @@ func ResourceApplyGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, }, Attrs: attrs, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // MultinomialAttr is an optional argument to Multinomial. @@ -9387,7 +9429,9 @@ func ResourceSparseApplyAdagradDAUseLocking(value bool) ResourceSparseApplyAdagr // l1: L1 regularization. Must be a scalar. // l2: L2 regularization. Must be a scalar. // global_step: Training step number. Must be a scalar. -func ResourceSparseApplyAdagradDA(scope *Scope, var_ tf.Output, gradient_accumulator tf.Output, gradient_squared_accumulator tf.Output, grad tf.Output, indices tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, global_step tf.Output, optional ...ResourceSparseApplyAdagradDAAttr) { +// +// Returns the created operation. +func ResourceSparseApplyAdagradDA(scope *Scope, var_ tf.Output, gradient_accumulator tf.Output, gradient_squared_accumulator tf.Output, grad tf.Output, indices tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, global_step tf.Output, optional ...ResourceSparseApplyAdagradDAAttr) (o *tf.Operation) { if scope.Err() != nil { return } @@ -9402,7 +9446,7 @@ func ResourceSparseApplyAdagradDA(scope *Scope, var_ tf.Output, gradient_accumul }, Attrs: attrs, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // SparseToSparseSetOperationAttr is an optional argument to SparseToSparseSetOperation. @@ -9653,7 +9697,9 @@ func ResourceSparseApplyCenteredRMSPropUseLocking(value bool) ResourceSparseAppl // epsilon: Ridge term. Must be a scalar. // grad: The gradient. // indices: A vector of indices into the first dimension of var, ms and mom. -func ResourceSparseApplyCenteredRMSProp(scope *Scope, var_ tf.Output, mg tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyCenteredRMSPropAttr) { +// +// Returns the created operation. +func ResourceSparseApplyCenteredRMSProp(scope *Scope, var_ tf.Output, mg tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyCenteredRMSPropAttr) (o *tf.Operation) { if scope.Err() != nil { return } @@ -9668,7 +9714,7 @@ func ResourceSparseApplyCenteredRMSProp(scope *Scope, var_ tf.Output, mg tf.Outp }, Attrs: attrs, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // Computes the mean along segments of a tensor. @@ -10261,7 +10307,9 @@ func ResourceApplyAdagradUseLocking(value bool) ResourceApplyAdagradAttr { // accum: Should be from a Variable(). // lr: Scaling factor. Must be a scalar. // grad: The gradient. -func ResourceApplyAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, optional ...ResourceApplyAdagradAttr) { +// +// Returns the created operation. +func ResourceApplyAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, optional ...ResourceApplyAdagradAttr) (o *tf.Operation) { if scope.Err() != nil { return } @@ -10276,7 +10324,7 @@ func ResourceApplyAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.O }, Attrs: attrs, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // Converts each string in the input Tensor to its hash mod by a number of buckets. @@ -10595,7 +10643,9 @@ func ResourceSparseApplyRMSPropUseLocking(value bool) ResourceSparseApplyRMSProp // epsilon: Ridge term. Must be a scalar. // grad: The gradient. // indices: A vector of indices into the first dimension of var, ms and mom. -func ResourceSparseApplyRMSProp(scope *Scope, var_ tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyRMSPropAttr) { +// +// Returns the created operation. +func ResourceSparseApplyRMSProp(scope *Scope, var_ tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyRMSPropAttr) (o *tf.Operation) { if scope.Err() != nil { return } @@ -10610,7 +10660,7 @@ func ResourceSparseApplyRMSProp(scope *Scope, var_ tf.Output, ms tf.Output, mom }, Attrs: attrs, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // QuantizeV2Attr is an optional argument to QuantizeV2. @@ -10787,7 +10837,9 @@ func ResourceSparseApplyFtrlUseLocking(value bool) ResourceSparseApplyFtrlAttr { // l1: L1 regularization. Must be a scalar. // l2: L2 regularization. Must be a scalar. // lr_power: Scaling factor. Must be a scalar. -func ResourceSparseApplyFtrl(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, indices tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, lr_power tf.Output, optional ...ResourceSparseApplyFtrlAttr) { +// +// Returns the created operation. +func ResourceSparseApplyFtrl(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, indices tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, lr_power tf.Output, optional ...ResourceSparseApplyFtrlAttr) (o *tf.Operation) { if scope.Err() != nil { return } @@ -10802,7 +10854,7 @@ func ResourceSparseApplyFtrl(scope *Scope, var_ tf.Output, accum tf.Output, line }, Attrs: attrs, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // Computes a 3-D convolution given 5-D `input` and `filter` tensors. @@ -11142,7 +11194,9 @@ func ResourceSparseApplyProximalAdagradUseLocking(value bool) ResourceSparseAppl // l2: L2 regularization. Must be a scalar. // grad: The gradient. // indices: A vector of indices into the first dimension of var and accum. -func ResourceSparseApplyProximalAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyProximalAdagradAttr) { +// +// Returns the created operation. +func ResourceSparseApplyProximalAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyProximalAdagradAttr) (o *tf.Operation) { if scope.Err() != nil { return } @@ -11157,7 +11211,7 @@ func ResourceSparseApplyProximalAdagrad(scope *Scope, var_ tf.Output, accum tf.O }, Attrs: attrs, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // Store the input tensor in the state of the current session. @@ -11480,7 +11534,9 @@ func ResourceApplyRMSPropUseLocking(value bool) ResourceApplyRMSPropAttr { // // epsilon: Ridge term. Must be a scalar. // grad: The gradient. -func ResourceApplyRMSProp(scope *Scope, var_ tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyRMSPropAttr) { +// +// Returns the created operation. +func ResourceApplyRMSProp(scope *Scope, var_ tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyRMSPropAttr) (o *tf.Operation) { if scope.Err() != nil { return } @@ -11495,7 +11551,7 @@ func ResourceApplyRMSProp(scope *Scope, var_ tf.Output, ms tf.Output, mom tf.Out }, Attrs: attrs, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // Output a fact about factorials. @@ -11676,7 +11732,9 @@ func ResourceSparseApplyAdagradUseLocking(value bool) ResourceSparseApplyAdagrad // lr: Learning rate. Must be a scalar. // grad: The gradient. // indices: A vector of indices into the first dimension of var and accum. -func ResourceSparseApplyAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyAdagradAttr) { +// +// Returns the created operation. +func ResourceSparseApplyAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyAdagradAttr) (o *tf.Operation) { if scope.Err() != nil { return } @@ -11691,7 +11749,7 @@ func ResourceSparseApplyAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, l }, Attrs: attrs, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // LRNGradAttr is an optional argument to LRNGrad. @@ -12439,7 +12497,9 @@ func MergeV2CheckpointsDeleteOldDirs(value bool) MergeV2CheckpointsAttr { // checkpoint_prefixes: prefixes of V2 checkpoints to merge. // destination_prefix: scalar. The desired final prefix. Allowed to be the same // as one of the checkpoint_prefixes. -func MergeV2Checkpoints(scope *Scope, checkpoint_prefixes tf.Output, destination_prefix tf.Output, optional ...MergeV2CheckpointsAttr) { +// +// Returns the created operation. +func MergeV2Checkpoints(scope *Scope, checkpoint_prefixes tf.Output, destination_prefix tf.Output, optional ...MergeV2CheckpointsAttr) (o *tf.Operation) { if scope.Err() != nil { return } @@ -12454,7 +12514,7 @@ func MergeV2Checkpoints(scope *Scope, checkpoint_prefixes tf.Output, destination }, Attrs: attrs, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // Reads the value of a variable. @@ -12629,7 +12689,9 @@ func Abs(scope *Scope, x tf.Output) (y tf.Output) { // reader_handle: Handle to a Reader. // state: Result of a ReaderSerializeState of a Reader with type // matching reader_handle. -func ReaderRestoreStateV2(scope *Scope, reader_handle tf.Output, state tf.Output) { +// +// Returns the created operation. +func ReaderRestoreStateV2(scope *Scope, reader_handle tf.Output, state tf.Output) (o *tf.Operation) { if scope.Err() != nil { return } @@ -12639,7 +12701,7 @@ func ReaderRestoreStateV2(scope *Scope, reader_handle tf.Output, state tf.Output reader_handle, state, }, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // Draw bounding boxes on a batch of images. @@ -12704,7 +12766,9 @@ func ResourceSparseApplyProximalGradientDescentUseLocking(value bool) ResourceSp // l2: L2 regularization. Must be a scalar. // grad: The gradient. // indices: A vector of indices into the first dimension of var and accum. -func ResourceSparseApplyProximalGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyProximalGradientDescentAttr) { +// +// Returns the created operation. +func ResourceSparseApplyProximalGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyProximalGradientDescentAttr) (o *tf.Operation) { if scope.Err() != nil { return } @@ -12719,7 +12783,7 @@ func ResourceSparseApplyProximalGradientDescent(scope *Scope, var_ tf.Output, al }, Attrs: attrs, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // UnpackAttr is an optional argument to Unpack. @@ -13078,7 +13142,9 @@ func ResourceApplyFtrlUseLocking(value bool) ResourceApplyFtrlAttr { // l1: L1 regulariation. Must be a scalar. // l2: L2 regulariation. Must be a scalar. // lr_power: Scaling factor. Must be a scalar. -func ResourceApplyFtrl(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, lr_power tf.Output, optional ...ResourceApplyFtrlAttr) { +// +// Returns the created operation. +func ResourceApplyFtrl(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, lr_power tf.Output, optional ...ResourceApplyFtrlAttr) (o *tf.Operation) { if scope.Err() != nil { return } @@ -13093,7 +13159,7 @@ func ResourceApplyFtrl(scope *Scope, var_ tf.Output, accum tf.Output, linear tf. }, Attrs: attrs, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // AnyAttr is an optional argument to Any. @@ -13279,7 +13345,9 @@ func TopKV2(scope *Scope, input tf.Output, k tf.Output, optional ...TopKV2Attr) // // Arguments: // handle: The handle for a tensor stored in the session state. -func DeleteSessionTensor(scope *Scope, handle tf.Output) { +// +// Returns the created operation. +func DeleteSessionTensor(scope *Scope, handle tf.Output) (o *tf.Operation) { if scope.Err() != nil { return } @@ -13289,7 +13357,7 @@ func DeleteSessionTensor(scope *Scope, handle tf.Output) { handle, }, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // DenseToDenseSetOperationAttr is an optional argument to DenseToDenseSetOperation. @@ -13794,7 +13862,9 @@ func ResourceApplyAdadeltaUseLocking(value bool) ResourceApplyAdadeltaAttr { // rho: Decay factor. Must be a scalar. // epsilon: Constant factor. Must be a scalar. // grad: The gradient. -func ResourceApplyAdadelta(scope *Scope, var_ tf.Output, accum tf.Output, accum_update tf.Output, lr tf.Output, rho tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdadeltaAttr) { +// +// Returns the created operation. +func ResourceApplyAdadelta(scope *Scope, var_ tf.Output, accum tf.Output, accum_update tf.Output, lr tf.Output, rho tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdadeltaAttr) (o *tf.Operation) { if scope.Err() != nil { return } @@ -13809,7 +13879,7 @@ func ResourceApplyAdadelta(scope *Scope, var_ tf.Output, accum tf.Output, accum_ }, Attrs: attrs, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // Shuffle dimensions of x according to a permutation. @@ -14026,7 +14096,9 @@ func QuantizedBatchNormWithGlobalNormalization(scope *Scope, t tf.Output, t_min // the tensor. // tensor_names: Shape `[N]`. The names of the tensors to be saved. // data: `N` tensors to save. -func Save(scope *Scope, filename tf.Output, tensor_names tf.Output, data []tf.Output) { +// +// Returns the created operation. +func Save(scope *Scope, filename tf.Output, tensor_names tf.Output, data []tf.Output) (o *tf.Operation) { if scope.Err() != nil { return } @@ -14036,7 +14108,7 @@ func Save(scope *Scope, filename tf.Output, tensor_names tf.Output, data []tf.Ou filename, tensor_names, tf.OutputList(data), }, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // Generate a glob pattern matching all sharded file names. @@ -14138,7 +14210,9 @@ func ResourceApplyAdamUseLocking(value bool) ResourceApplyAdamAttr { // beta2: Momentum factor. Must be a scalar. // epsilon: Ridge term. Must be a scalar. // grad: The gradient. -func ResourceApplyAdam(scope *Scope, var_ tf.Output, m tf.Output, v tf.Output, beta1_power tf.Output, beta2_power tf.Output, lr tf.Output, beta1 tf.Output, beta2 tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdamAttr) { +// +// Returns the created operation. +func ResourceApplyAdam(scope *Scope, var_ tf.Output, m tf.Output, v tf.Output, beta1_power tf.Output, beta2_power tf.Output, lr tf.Output, beta1 tf.Output, beta2 tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdamAttr) (o *tf.Operation) { if scope.Err() != nil { return } @@ -14153,7 +14227,7 @@ func ResourceApplyAdam(scope *Scope, var_ tf.Output, m tf.Output, v tf.Output, b }, Attrs: attrs, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // Constructs a tensor by tiling a given tensor. @@ -15627,7 +15701,9 @@ func ResourceSparseApplyAdadeltaUseLocking(value bool) ResourceSparseApplyAdadel // epsilon: Constant factor. Must be a scalar. // grad: The gradient. // indices: A vector of indices into the first dimension of var and accum. -func ResourceSparseApplyAdadelta(scope *Scope, var_ tf.Output, accum tf.Output, accum_update tf.Output, lr tf.Output, rho tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyAdadeltaAttr) { +// +// Returns the created operation. +func ResourceSparseApplyAdadelta(scope *Scope, var_ tf.Output, accum tf.Output, accum_update tf.Output, lr tf.Output, rho tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyAdadeltaAttr) (o *tf.Operation) { if scope.Err() != nil { return } @@ -15642,7 +15718,7 @@ func ResourceSparseApplyAdadelta(scope *Scope, var_ tf.Output, accum tf.Output, }, Attrs: attrs, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // Returns which elements of x are NaN. @@ -16106,7 +16182,9 @@ func AssertSummarize(value int64) AssertAttr { // Arguments: // condition: The condition to evaluate. // data: The tensors to print out when condition is false. -func Assert(scope *Scope, condition tf.Output, data []tf.Output, optional ...AssertAttr) { +// +// Returns the created operation. +func Assert(scope *Scope, condition tf.Output, data []tf.Output, optional ...AssertAttr) (o *tf.Operation) { if scope.Err() != nil { return } @@ -16121,7 +16199,7 @@ func Assert(scope *Scope, condition tf.Output, data []tf.Output, optional ...Ass }, Attrs: attrs, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // Computes the power of one value to another. @@ -17375,7 +17453,9 @@ func ResourceSparseApplyMomentumUseNesterov(value bool) ResourceSparseApplyMomen // grad: The gradient. // indices: A vector of indices into the first dimension of var and accum. // momentum: Momentum. Must be a scalar. -func ResourceSparseApplyMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, indices tf.Output, momentum tf.Output, optional ...ResourceSparseApplyMomentumAttr) { +// +// Returns the created operation. +func ResourceSparseApplyMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, indices tf.Output, momentum tf.Output, optional ...ResourceSparseApplyMomentumAttr) (o *tf.Operation) { if scope.Err() != nil { return } @@ -17390,7 +17470,7 @@ func ResourceSparseApplyMomentum(scope *Scope, var_ tf.Output, accum tf.Output, }, Attrs: attrs, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // Returns the complex conjugate of a complex number. @@ -17803,7 +17883,9 @@ func QueueEnqueueManyV2TimeoutMs(value int64) QueueEnqueueManyV2Attr { // handle: The handle to a queue. // components: One or more tensors from which the enqueued tensors should // be taken. -func QueueEnqueueManyV2(scope *Scope, handle tf.Output, components []tf.Output, optional ...QueueEnqueueManyV2Attr) { +// +// Returns the created operation. +func QueueEnqueueManyV2(scope *Scope, handle tf.Output, components []tf.Output, optional ...QueueEnqueueManyV2Attr) (o *tf.Operation) { if scope.Err() != nil { return } @@ -17818,7 +17900,7 @@ func QueueEnqueueManyV2(scope *Scope, handle tf.Output, components []tf.Output, }, Attrs: attrs, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // Forwards the input to the output. @@ -18444,7 +18526,9 @@ func MatrixSolve(scope *Scope, matrix tf.Output, rhs tf.Output, optional ...Matr // shape_and_slices: shape {N}. The slice specs of the tensors to be saved. // Empty strings indicate that they are non-partitioned tensors. // tensors: `N` tensors to save. -func SaveV2(scope *Scope, prefix tf.Output, tensor_names tf.Output, shape_and_slices tf.Output, tensors []tf.Output) { +// +// Returns the created operation. +func SaveV2(scope *Scope, prefix tf.Output, tensor_names tf.Output, shape_and_slices tf.Output, tensors []tf.Output) (o *tf.Operation) { if scope.Err() != nil { return } @@ -18454,7 +18538,7 @@ func SaveV2(scope *Scope, prefix tf.Output, tensor_names tf.Output, shape_and_sl prefix, tensor_names, shape_and_slices, tf.OutputList(tensors), }, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // MatrixTriangularSolveAttr is an optional argument to MatrixTriangularSolve. @@ -19565,7 +19649,9 @@ func AdjustContrast(scope *Scope, images tf.Output, contrast_factor tf.Output, m // // Arguments: // reader_handle: Handle to a Reader. -func ReaderResetV2(scope *Scope, reader_handle tf.Output) { +// +// Returns the created operation. +func ReaderResetV2(scope *Scope, reader_handle tf.Output) (o *tf.Operation) { if scope.Err() != nil { return } @@ -19575,7 +19661,7 @@ func ReaderResetV2(scope *Scope, reader_handle tf.Output) { reader_handle, }, } - scope.AddOperation(opspec) + return scope.AddOperation(opspec) } // Adjust the hue of one or more images. |