aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/core/ops/ctc_ops.cc
diff options
context:
space:
mode:
authorGravatar Anna R <annarev@google.com>2018-01-03 07:54:54 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-01-03 07:58:09 -0800
commitca19540ebdb827c9ac9a237bde97065e787dbe4f (patch)
treeb54019c962d8ee95fefe6165d58a01dcc4cb2de5 /tensorflow/core/ops/ctc_ops.cc
parent961be409bbb0d3febf8a1005e67cb6750b75806d (diff)
Removing doc strings from REGISTER_OP calls in core/ops.
PiperOrigin-RevId: 180670333
Diffstat (limited to 'tensorflow/core/ops/ctc_ops.cc')
-rw-r--r--tensorflow/core/ops/ctc_ops.cc80
1 files changed, 3 insertions, 77 deletions
diff --git a/tensorflow/core/ops/ctc_ops.cc b/tensorflow/core/ops/ctc_ops.cc
index 1a69106d80..f2322c730b 100644
--- a/tensorflow/core/ops/ctc_ops.cc
+++ b/tensorflow/core/ops/ctc_ops.cc
@@ -59,30 +59,7 @@ REGISTER_OP("CTCLoss")
c->set_output(0, c->Vector(batch_size));
c->set_output(1, inputs);
return Status::OK();
- })
- .Doc(R"doc(
-Calculates the CTC Loss (log probability) for each batch entry. Also calculates
-the gradient. This class performs the softmax operation for you, so inputs
-should be e.g. linear projections of outputs by an LSTM.
-
-inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
-labels_indices: The indices of a `SparseTensor<int32, 2>`.
- `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for
- `(batch b, time t)`.
-labels_values: The values (labels) associated with the given batch and time.
-sequence_length: A vector containing sequence lengths (batch).
-preprocess_collapse_repeated: Scalar, if true then repeated labels are
- collapsed prior to the CTC calculation.
-ctc_merge_repeated: Scalar. If set to false, *during* CTC calculation
- repeated non-blank labels will not be merged and are interpreted as
- individual labels. This is a simplified version of CTC.
-ignore_longer_outputs_than_inputs: Scalar. If set to true, during CTC
- calculation, items that have longer output sequences than input sequences
- are skipped: they don't contribute to the loss term and have zero-gradient.
-loss: A vector (batch) containing log-probabilities.
-gradient: The gradient of `loss`. 3-D, shape:
- `(max_time x batch_size x num_classes)`.
-)doc");
+ });
REGISTER_OP("CTCGreedyDecoder")
.Input("inputs: float")
@@ -110,32 +87,7 @@ REGISTER_OP("CTCGreedyDecoder")
c->set_output(2, c->Vector(2));
c->set_output(3, c->Matrix(batch_size, 1));
return Status::OK();
- })
- .Doc(R"doc(
-Performs greedy decoding on the logits given in inputs.
-
-A note about the attribute merge_repeated: if enabled, when
-consecutive logits' maximum indices are the same, only the first of
-these is emitted. Labeling the blank '*', the sequence "A B B * B B"
-becomes "A B B" if merge_repeated = True and "A B B B B" if
-merge_repeated = False.
-
-Regardless of the value of merge_repeated, if the maximum index of a given
-time and batch corresponds to the blank, index `(num_classes - 1)`, no new
-element is emitted.
-
-inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
-sequence_length: A vector containing sequence lengths, size `(batch_size)`.
-merge_repeated: If True, merge repeated classes in output.
-decoded_indices: Indices matrix, size `(total_decoded_outputs x 2)`,
- of a `SparseTensor<int64, 2>`. The rows store: [batch, time].
-decoded_values: Values vector, size: `(total_decoded_outputs)`,
- of a `SparseTensor<int64, 2>`. The vector stores the decoded classes.
-decoded_shape: Shape vector, size `(2)`, of the decoded SparseTensor.
- Values are: `[batch_size, max_decoded_length]`.
-log_probability: Matrix, size `(batch_size x 1)`, containing sequence
- log-probabilities.
-)doc");
+ });
REGISTER_OP("CTCBeamSearchDecoder")
.Input("inputs: float")
@@ -176,32 +128,6 @@ REGISTER_OP("CTCBeamSearchDecoder")
}
c->set_output(out_idx++, c->Matrix(batch_size, top_paths));
return Status::OK();
- })
- .Doc(R"doc(
-Performs beam search decoding on the logits given in input.
-
-A note about the attribute merge_repeated: For the beam search decoder,
-this means that if consecutive entries in a beam are the same, only
-the first of these is emitted. That is, when the top path is "A B B B B",
-"A B" is returned if merge_repeated = True but "A B B B B" is
-returned if merge_repeated = False.
-
-inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
-sequence_length: A vector containing sequence lengths, size `(batch)`.
-beam_width: A scalar >= 0 (beam search beam width).
-top_paths: A scalar >= 0, <= beam_width (controls output size).
-merge_repeated: If true, merge repeated classes in output.
-decoded_indices: A list (length: top_paths) of indices matrices. Matrix j,
- size `(total_decoded_outputs[j] x 2)`, has indices of a
- `SparseTensor<int64, 2>`. The rows store: [batch, time].
-decoded_values: A list (length: top_paths) of values vectors. Vector j,
- size `(length total_decoded_outputs[j])`, has the values of a
- `SparseTensor<int64, 2>`. The vector stores the decoded classes for beam j.
-decoded_shape: A list (length: top_paths) of shape vector. Vector j,
- size `(2)`, stores the shape of the decoded `SparseTensor[j]`.
- Its values are: `[batch_size, max_decoded_length[j]]`.
-log_probability: A matrix, shaped: `(batch_size x top_paths)`. The
- sequence log-probabilities.
-)doc");
+ });
} // namespace tensorflow