aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/core/ops/ops.pbtxt
diff options
context:
space:
mode:
Diffstat (limited to 'tensorflow/core/ops/ops.pbtxt')
-rw-r--r--tensorflow/core/ops/ops.pbtxt54
1 files changed, 54 insertions, 0 deletions
diff --git a/tensorflow/core/ops/ops.pbtxt b/tensorflow/core/ops/ops.pbtxt
index 468434bd28..2839575ec7 100644
--- a/tensorflow/core/ops/ops.pbtxt
+++ b/tensorflow/core/ops/ops.pbtxt
@@ -23384,6 +23384,60 @@ op {
description: "Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in\n`input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`.\n\n```python\n# a is a tensor.\n# e is a tensor of eigenvalues.\n# v is a tensor of eigenvectors.\ne, v = self_adjoint_eig(a)\ne = self_adjoint_eig(a, compute_v=False)\n```"
}
op {
+ name: "Selu"
+ input_arg {
+ name: "features"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "activations"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_HALF
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ }
+ }
+ }
+ summary: "Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)` if < 0, `scale * features` otherwise."
+ description: "See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)"
+}
+op {
+ name: "SeluGrad"
+ input_arg {
+ name: "gradients"
+ description: "The backpropagated gradients to the corresponding Selu operation."
+ type_attr: "T"
+ }
+ input_arg {
+ name: "outputs"
+ description: "The outputs of the corresponding Selu operation."
+ type_attr: "T"
+ }
+ output_arg {
+ name: "backprops"
+ description: "The gradients: `gradients * (outputs + scale * alpha)` if outputs < 0,\n`scale * gradients` otherwise."
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_HALF
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ }
+ }
+ }
+ summary: "Computes gradients for the scaled exponential linear (Selu) operation."
+}
+op {
name: "SerializeManySparse"
input_arg {
name: "sparse_indices"