aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/tools/api/golden/tensorflow.train.-proximal-adagrad-optimizer.pbtxt
diff options
context:
space:
mode:
authorGravatar Gunhan Gulsoy <gunan@google.com>2017-04-05 14:05:45 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-04-05 15:29:34 -0700
commit06c622b321e7b2ebf12cb1c208649efacab71166 (patch)
treedb53ccefd80e3abe672044fc1d08aa47fbfcff6d /tensorflow/tools/api/golden/tensorflow.train.-proximal-adagrad-optimizer.pbtxt
parentc76241258a566b82ac3cdd57fdfaab353d89b8f1 (diff)
API backwards compatibility tests.
Change: 152310869
Diffstat (limited to 'tensorflow/tools/api/golden/tensorflow.train.-proximal-adagrad-optimizer.pbtxt')
-rw-r--r--tensorflow/tools/api/golden/tensorflow.train.-proximal-adagrad-optimizer.pbtxt46
1 files changed, 46 insertions, 0 deletions
diff --git a/tensorflow/tools/api/golden/tensorflow.train.-proximal-adagrad-optimizer.pbtxt b/tensorflow/tools/api/golden/tensorflow.train.-proximal-adagrad-optimizer.pbtxt
new file mode 100644
index 0000000000..571d846b6c
--- /dev/null
+++ b/tensorflow/tools/api/golden/tensorflow.train.-proximal-adagrad-optimizer.pbtxt
@@ -0,0 +1,46 @@
+path: "tensorflow.train.ProximalAdagradOptimizer"
+tf_class {
+ is_instance: "<class \'tensorflow.python.training.proximal_adagrad.ProximalAdagradOptimizer\'>"
+ is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "GATE_GRAPH"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "GATE_NONE"
+ mtype: "<type \'int\'>"
+ }
+ member {
+ name: "GATE_OP"
+ mtype: "<type \'int\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'learning_rate\', \'initial_accumulator_value\', \'l1_regularization_strength\', \'l2_regularization_strength\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'0.1\', \'0.0\', \'0.0\', \'False\', \'ProximalAdagrad\'], "
+ }
+ member_method {
+ name: "apply_gradients"
+ argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
+ }
+ member_method {
+ name: "compute_gradients"
+ argspec: "args=[\'self\', \'loss\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'False\', \'None\'], "
+ }
+ member_method {
+ name: "get_name"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_slot"
+ argspec: "args=[\'self\', \'var\', \'name\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "get_slot_names"
+ argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "minimize"
+ argspec: "args=[\'self\', \'loss\', \'global_step\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'1\', \'None\', \'False\', \'None\', \'None\'], "
+ }
+}