From b42e7926f78dcb14fdb2169d106fc7a923acca78 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Thu, 2 Aug 2018 14:27:04 -0700 Subject: Go: Update generated wrapper functions for TensorFlow ops. PiperOrigin-RevId: 207165297 --- tensorflow/go/op/wrappers.go | 166 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 161 insertions(+), 5 deletions(-) (limited to 'tensorflow/go') diff --git a/tensorflow/go/op/wrappers.go b/tensorflow/go/op/wrappers.go index b81e0da07c..ca1521e641 100644 --- a/tensorflow/go/op/wrappers.go +++ b/tensorflow/go/op/wrappers.go @@ -334,8 +334,12 @@ func FakeQuantWithMinMaxArgs(scope *Scope, inputs tf.Output, optional ...FakeQua // the given `shape` according to indices. This operator is the inverse of the // @{tf.gather_nd} operator which extracts values or slices from a given tensor. // +// If `indices` contains duplicates, then their updates are accumulated (summed). +// // **WARNING**: The order in which updates are applied is nondeterministic, so the -// output will be nondeterministic if `indices` contains duplicates. +// output will be nondeterministic if `indices` contains duplicates -- because +// of some numerical approximation issues, numbers summed in different order +// may yield different results. // // `indices` is an integer tensor containing indices into a new tensor of shape // `shape`. The last dimension of `indices` can be at most the rank of `shape`: @@ -9735,7 +9739,7 @@ func ResourceScatterNdAddUseLocking(value bool) ResourceScatterNdAddAttr { // 8 elements. In Python, that update would look like this: // // ```python -// ref = tfe.Variable([1, 2, 3, 4, 5, 6, 7, 8]) +// ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True) // indices = tf.constant([[4], [3], [1] ,[7]]) // updates = tf.constant([9, 10, 11, 12]) // update = tf.scatter_nd_add(ref, indices, updates) @@ -12786,7 +12790,7 @@ func ResourceScatterNdUpdateUseLocking(value bool) ResourceScatterNdUpdateAttr { // 8 elements. In Python, that update would look like this: // // ```python -// ref = tfe.Variable([1, 2, 3, 4, 5, 6, 7, 8]) +// ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) // indices = tf.constant([[4], [3], [1] ,[7]]) // updates = tf.constant([9, 10, 11, 12]) // update = tf.scatter_nd_update(ref, indices, updates) @@ -21968,7 +21972,7 @@ func PaddedBatchDatasetV2(scope *Scope, input_dataset tf.Output, batch_size tf.O return op.Output(0) } -// Returns element-wise smallest integer in not less than x. +// Returns element-wise smallest integer not less than x. func Ceil(scope *Scope, x tf.Output) (y tf.Output) { if scope.Err() != nil { return @@ -25884,6 +25888,73 @@ func NonMaxSuppressionV3(scope *Scope, boxes tf.Output, scores tf.Output, max_ou return op.Output(0) } +// NonMaxSuppressionV4Attr is an optional argument to NonMaxSuppressionV4. +type NonMaxSuppressionV4Attr func(optionalAttr) + +// NonMaxSuppressionV4PadToMaxOutputSize sets the optional pad_to_max_output_size attribute to value. +// +// value: If true, the output `selected_indices` is padded to be of length +// `max_output_size`. Defaults to false. +// If not specified, defaults to false +func NonMaxSuppressionV4PadToMaxOutputSize(value bool) NonMaxSuppressionV4Attr { + return func(m optionalAttr) { + m["pad_to_max_output_size"] = value + } +} + +// Greedily selects a subset of bounding boxes in descending order of score, +// +// pruning away boxes that have high intersection-over-union (IOU) overlap +// with previously selected boxes. Bounding boxes with score less than +// `score_threshold` are removed. Bounding boxes are supplied as +// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any +// diagonal pair of box corners and the coordinates can be provided as normalized +// (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm +// is agnostic to where the origin is in the coordinate system and more +// generally is invariant to orthogonal transformations and translations +// of the coordinate system; thus translating or reflections of the coordinate +// system result in the same boxes being selected by the algorithm. +// The output of this operation is a set of integers indexing into the input +// collection of bounding boxes representing the selected boxes. The bounding +// box coordinates corresponding to the selected indices can then be obtained +// using the `tf.gather operation`. For example: +// selected_indices = tf.image.non_max_suppression_v2( +// boxes, scores, max_output_size, iou_threshold, score_threshold) +// selected_boxes = tf.gather(boxes, selected_indices) +// +// Arguments: +// boxes: A 2-D float tensor of shape `[num_boxes, 4]`. +// scores: A 1-D float tensor of shape `[num_boxes]` representing a single +// score corresponding to each box (each row of boxes). +// max_output_size: A scalar integer tensor representing the maximum number of +// boxes to be selected by non max suppression. +// iou_threshold: A 0-D float tensor representing the threshold for deciding whether +// boxes overlap too much with respect to IOU. +// score_threshold: A 0-D float tensor representing the threshold for deciding when to remove +// boxes based on score. +// +// Returns A 1-D integer tensor of shape `[M]` representing the selected +// indices from the boxes tensor, where `M <= max_output_size`.A 0-D integer tensor representing the number of valid elements in +// `selected_indices`, with the valid elements appearing first. +func NonMaxSuppressionV4(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, iou_threshold tf.Output, score_threshold tf.Output, optional ...NonMaxSuppressionV4Attr) (selected_indices tf.Output, valid_outputs tf.Output) { + if scope.Err() != nil { + return + } + attrs := map[string]interface{}{} + for _, a := range optional { + a(attrs) + } + opspec := tf.OpSpec{ + Type: "NonMaxSuppressionV4", + Input: []tf.Input{ + boxes, scores, max_output_size, iou_threshold, score_threshold, + }, + Attrs: attrs, + } + op := scope.AddOperation(opspec) + return op.Output(0), op.Output(1) +} + // Computes the matrix logarithm of one or more square matrices: // // @@ -27342,7 +27413,7 @@ func AdjustContrastv2(scope *Scope, images tf.Output, contrast_factor tf.Output) return op.Output(0) } -// Gets the next output from the given iterator. +// Gets the next output from the given iterator . func IteratorGetNext(scope *Scope, iterator tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) { if scope.Err() != nil { return @@ -27750,6 +27821,21 @@ func SinkDataset(scope *Scope, input_dataset tf.Output) (handle tf.Output) { return op.Output(0) } +// Constructs an Optional variant from a tuple of tensors. +func OptionalFromValue(scope *Scope, components []tf.Output) (optional tf.Output) { + if scope.Err() != nil { + return + } + opspec := tf.OpSpec{ + Type: "OptionalFromValue", + Input: []tf.Input{ + tf.OutputList(components), + }, + } + op := scope.AddOperation(opspec) + return op.Output(0) +} + // DecodeProtoV2Attr is an optional argument to DecodeProtoV2. type DecodeProtoV2Attr func(optionalAttr) @@ -27876,6 +27962,33 @@ func DecodeProtoV2(scope *Scope, bytes tf.Output, message_type string, field_nam return sizes, values } +// Creates an Optional variant with no value. +func OptionalNone(scope *Scope) (optional tf.Output) { + if scope.Err() != nil { + return + } + opspec := tf.OpSpec{ + Type: "OptionalNone", + } + op := scope.AddOperation(opspec) + return op.Output(0) +} + +// Returns true if and only if the given Optional variant has a value. +func OptionalHasValue(scope *Scope, optional tf.Output) (has_value tf.Output) { + if scope.Err() != nil { + return + } + opspec := tf.OpSpec{ + Type: "OptionalHasValue", + Input: []tf.Input{ + optional, + }, + } + op := scope.AddOperation(opspec) + return op.Output(0) +} + // Creates a dataset that executes a SQL query and emits rows of the result set. // // Arguments: @@ -27900,6 +28013,49 @@ func SqlDataset(scope *Scope, driver_name tf.Output, data_source_name tf.Output, return op.Output(0) } +// Returns the value stored in an Optional variant or raises an error if none exists. +func OptionalGetValue(scope *Scope, optional tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) { + if scope.Err() != nil { + return + } + attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes} + opspec := tf.OpSpec{ + Type: "OptionalGetValue", + Input: []tf.Input{ + optional, + }, + Attrs: attrs, + } + op := scope.AddOperation(opspec) + if scope.Err() != nil { + return + } + var idx int + var err error + if components, idx, err = makeOutputList(op, idx, "components"); err != nil { + scope.UpdateErr("OptionalGetValue", err) + return + } + return components +} + +// Gets the next output from the given iterator as an Optional variant. +func IteratorGetNextAsOptional(scope *Scope, iterator tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (optional tf.Output) { + if scope.Err() != nil { + return + } + attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes} + opspec := tf.OpSpec{ + Type: "IteratorGetNextAsOptional", + Input: []tf.Input{ + iterator, + }, + Attrs: attrs, + } + op := scope.AddOperation(opspec) + return op.Output(0) +} + // Performs a padding as a preprocess during a convolution. // // Similar to FusedResizeAndPadConv2d, this op allows for an optimized -- cgit v1.2.3